xref: /aosp_15_r20/frameworks/native/libs/binder/tests/schd-dbg.cpp (revision 38e8c45f13ce32b0dcecb25141ffecaf386fa17f)
1 #include <binder/Binder.h>
2 #include <binder/IBinder.h>
3 #include <binder/IPCThreadState.h>
4 #include <binder/IServiceManager.h>
5 #include <cstdio>
6 #include <cstdlib>
7 #include <cstring>
8 #include <string>
9 
10 #include <iomanip>
11 #include <iostream>
12 #include <tuple>
13 #include <vector>
14 
15 #include <pthread.h>
16 #include <sys/wait.h>
17 #include <unistd.h>
18 #include <fstream>
19 
20 using namespace std;
21 using namespace android;
22 
23 enum BinderWorkerServiceCode {
24   BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION,
25 };
26 
27 #define ASSERT(cond)                                                \
28   do {                                                              \
29     if (!(cond)) {                                                  \
30       cerr << __func__ << ":" << __LINE__ << " condition:" << #cond \
31            << " failed\n"                                           \
32            << endl;                                                 \
33       exit(EXIT_FAILURE);                                           \
34     }                                                               \
35   } while (0)
36 
37 vector<sp<IBinder> > workers;
38 
39 // the ratio that the service is synced on the same cpu beyond
40 // GOOD_SYNC_MIN is considered as good
41 #define GOOD_SYNC_MIN (0.6)
42 
43 #define DUMP_PRESICION 2
44 
45 string trace_path = "/sys/kernel/debug/tracing";
46 
47 // the default value
48 int no_process = 2;
49 int iterations = 100;
50 int payload_size = 16;
51 int no_inherent = 0;
52 int no_sync = 0;
53 int verbose = 0;
54 int trace;
55 
traceIsOn()56 bool traceIsOn() {
57   fstream file;
58   file.open(trace_path + "/tracing_on", ios::in);
59   char on;
60   file >> on;
61   file.close();
62   return on == '1';
63 }
64 
traceStop()65 void traceStop() {
66   ofstream file;
67   file.open(trace_path + "/tracing_on", ios::out | ios::trunc);
68   file << '0' << endl;
69   file.close();
70 }
71 
72 // the deadline latency that we are interested in
73 uint64_t deadline_us = 2500;
74 
thread_pri()75 int thread_pri() {
76   struct sched_param param;
77   int policy;
78   ASSERT(!pthread_getschedparam(pthread_self(), &policy, &param));
79   return param.sched_priority;
80 }
81 
thread_dump(const char * prefix)82 void thread_dump(const char* prefix) {
83   struct sched_param param;
84   int policy;
85   if (!verbose) return;
86   cout << "--------------------------------------------------" << endl;
87   cout << setw(12) << left << prefix << " pid: " << getpid()
88        << " tid: " << gettid() << " cpu: " << sched_getcpu() << endl;
89   ASSERT(!pthread_getschedparam(pthread_self(), &policy, &param));
90   string s = (policy == SCHED_OTHER)
91                  ? "SCHED_OTHER"
92                  : (policy == SCHED_FIFO)
93                        ? "SCHED_FIFO"
94                        : (policy == SCHED_RR) ? "SCHED_RR" : "???";
95   cout << setw(12) << left << s << param.sched_priority << endl;
96   return;
97 }
98 
99 class BinderWorkerService : public BBinder {
100  public:
BinderWorkerService()101   BinderWorkerService() {
102   }
~BinderWorkerService()103   ~BinderWorkerService() {
104   }
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)105   virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
106                               uint32_t flags = 0) {
107     (void)flags;
108     (void)data;
109     (void)reply;
110     switch (code) {
111       // The transaction format is like
112       //
113       // data[in]:  int32: caller priority
114       //            int32: caller cpu
115       //
116       // reply[out]: int32: 1 if caller's priority != callee's priority
117       //             int32: 1 if caller's cpu != callee's cpu
118       //
119       // note the caller cpu read here is not always correct
120       // there're still chances that the caller got switched out
121       // right after it read the cpu number and still before the transaction.
122       case BINDER_NOP: {
123         thread_dump("binder");
124         int priority = thread_pri();
125         int priority_caller = data.readInt32();
126         int h = 0, s = 0;
127         if (priority_caller != priority) {
128           h++;
129           if (verbose) {
130             cout << "err priority_caller:" << priority_caller
131                  << ", priority:" << priority << endl;
132           }
133         }
134         if (priority == sched_get_priority_max(SCHED_FIFO)) {
135           int cpu = sched_getcpu();
136           int cpu_caller = data.readInt32();
137           if (cpu != cpu_caller) {
138             s++;
139           }
140         }
141         reply->writeInt32(h);
142         reply->writeInt32(s);
143         return NO_ERROR;
144       }
145       default:
146         return UNKNOWN_TRANSACTION;
147     };
148   }
149 };
150 
151 class Pipe {
152   int m_readFd;
153   int m_writeFd;
Pipe(int readFd,int writeFd)154   Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {
155   }
156   Pipe(const Pipe&) = delete;
157   Pipe& operator=(const Pipe&) = delete;
158   Pipe& operator=(const Pipe&&) = delete;
159 
160  public:
Pipe(Pipe && rval)161   Pipe(Pipe&& rval) noexcept {
162     m_readFd = rval.m_readFd;
163     m_writeFd = rval.m_writeFd;
164     rval.m_readFd = 0;
165     rval.m_writeFd = 0;
166   }
~Pipe()167   ~Pipe() {
168     if (m_readFd) close(m_readFd);
169     if (m_writeFd) close(m_writeFd);
170   }
signal()171   void signal() {
172     bool val = true;
173     int error = write(m_writeFd, &val, sizeof(val));
174     ASSERT(error >= 0);
175   };
wait()176   void wait() {
177     bool val = false;
178     int error = read(m_readFd, &val, sizeof(val));
179     ASSERT(error >= 0);
180   }
181   template <typename T>
send(const T & v)182   void send(const T& v) {
183     int error = write(m_writeFd, &v, sizeof(T));
184     ASSERT(error >= 0);
185   }
186   template <typename T>
recv(T & v)187   void recv(T& v) {
188     int error = read(m_readFd, &v, sizeof(T));
189     ASSERT(error >= 0);
190   }
createPipePair()191   static tuple<Pipe, Pipe> createPipePair() {
192     int a[2];
193     int b[2];
194 
195     int error1 = pipe(a);
196     int error2 = pipe(b);
197     ASSERT(error1 >= 0);
198     ASSERT(error2 >= 0);
199 
200     return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
201   }
202 };
203 
204 typedef chrono::time_point<chrono::high_resolution_clock> Tick;
205 
tickNow()206 static inline Tick tickNow() {
207   return chrono::high_resolution_clock::now();
208 }
209 
tickNano(Tick & sta,Tick & end)210 static inline uint64_t tickNano(Tick& sta, Tick& end) {
211   return uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - sta).count());
212 }
213 
214 struct Results {
215   uint64_t m_best = 0xffffffffffffffffULL;
216   uint64_t m_worst = 0;
217   uint64_t m_transactions = 0;
218   uint64_t m_total_time = 0;
219   uint64_t m_miss = 0;
220   bool tracing;
ResultsResults221   explicit Results(bool _tracing) : tracing(_tracing) {
222   }
miss_deadlineResults223   inline bool miss_deadline(uint64_t nano) {
224     return nano > deadline_us * 1000;
225   }
add_timeResults226   void add_time(uint64_t nano) {
227     m_best = min(nano, m_best);
228     m_worst = max(nano, m_worst);
229     m_transactions += 1;
230     m_total_time += nano;
231     if (miss_deadline(nano)) m_miss++;
232     if (miss_deadline(nano) && tracing) {
233       // There might be multiple process pair running the test concurrently
234       // each may execute following statements and only the first one actually
235       // stop the trace and any traceStop() afterthen has no effect.
236       traceStop();
237       cout << endl;
238       cout << "deadline triggered: halt & stop trace" << endl;
239       cout << "log:" + trace_path + "/trace" << endl;
240       cout << endl;
241       exit(1);
242     }
243   }
dumpResults244   void dump() {
245     double best = (double)m_best / 1.0E6;
246     double worst = (double)m_worst / 1.0E6;
247     double average = (double)m_total_time / m_transactions / 1.0E6;
248     // TODO: libjson?
249     int W = DUMP_PRESICION + 2;
250     cout << setprecision(DUMP_PRESICION) << "{ \"avg\":" << setw(W) << left
251          << average << ",\"wst\":" << setw(W) << left << worst
252          << ",\"bst\":" << setw(W) << left << best << ",\"miss\":" << left
253          << m_miss << ",\"meetR\":" << left << setprecision(DUMP_PRESICION + 3)
254          << (1.0 - (double)m_miss / m_transactions) << "}";
255   }
256 };
257 
generateServiceName(int num)258 String16 generateServiceName(int num) {
259   char num_str[32];
260   snprintf(num_str, sizeof(num_str), "%d", num);
261   String16 serviceName = String16("binderWorker") + String16(num_str);
262   return serviceName;
263 }
264 
parcel_fill(Parcel & data,int sz,int priority,int cpu)265 static void parcel_fill(Parcel& data, int sz, int priority, int cpu) {
266   ASSERT(sz >= (int)sizeof(uint32_t) * 2);
267   data.writeInt32(priority);
268   data.writeInt32(cpu);
269   sz -= sizeof(uint32_t);
270   while (sz > (int)sizeof(uint32_t)) {
271     data.writeInt32(0);
272     sz -= sizeof(uint32_t);
273   }
274 }
275 
276 typedef struct {
277   void* result;
278   int target;
279 } thread_priv_t;
280 
thread_start(void * p)281 static void* thread_start(void* p) {
282   thread_priv_t* priv = (thread_priv_t*)p;
283   int target = priv->target;
284   Results* results_fifo = (Results*)priv->result;
285   Parcel data, reply;
286   Tick sta, end;
287 
288   parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
289   thread_dump("fifo-caller");
290 
291   sta = tickNow();
292   status_t ret = workers[target]->transact(BINDER_NOP, data, &reply);
293   ASSERT(ret == NO_ERROR);
294   end = tickNow();
295   results_fifo->add_time(tickNano(sta, end));
296 
297   no_inherent += reply.readInt32();
298   no_sync += reply.readInt32();
299   return nullptr;
300 }
301 
302 // create a fifo thread to transact and wait it to finished
thread_transaction(int target,Results * results_fifo)303 static void thread_transaction(int target, Results* results_fifo) {
304   thread_priv_t thread_priv;
305   void* dummy;
306   pthread_t thread;
307   pthread_attr_t attr;
308   struct sched_param param;
309   thread_priv.target = target;
310   thread_priv.result = results_fifo;
311   ASSERT(!pthread_attr_init(&attr));
312   ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
313   param.sched_priority = sched_get_priority_max(SCHED_FIFO);
314   ASSERT(!pthread_attr_setschedparam(&attr, &param));
315   ASSERT(!pthread_create(&thread, &attr, &thread_start, &thread_priv));
316   ASSERT(!pthread_join(thread, &dummy));
317 }
318 
319 #define is_client(_num) ((_num) >= (no_process / 2))
320 
worker_fx(int num,int no_process,int iterations,int payload_size,Pipe p)321 void worker_fx(int num, int no_process, int iterations, int payload_size,
322                Pipe p) {
323   int dummy;
324   Results results_other(false), results_fifo(trace);
325 
326   // Create BinderWorkerService and for go.
327   ProcessState::self()->startThreadPool();
328   sp<IServiceManager> serviceMgr = defaultServiceManager();
329   sp<BinderWorkerService> service = new BinderWorkerService;
330   serviceMgr->addService(generateServiceName(num), service);
331   // init done
332   p.signal();
333   // wait for kick-off
334   p.wait();
335 
336   // If client/server pairs, then half the workers are
337   // servers and half are clients
338   int server_count = no_process / 2;
339 
340   for (int i = 0; i < server_count; i++) {
341     // self service is in-process so just skip
342     if (num == i) continue;
343 #pragma clang diagnostic push
344 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
345     workers.push_back(serviceMgr->getService(generateServiceName(i)));
346 #pragma clang diagnostic pop
347   }
348 
349   // Client for each pair iterates here
350   // each iterations contains exatcly 2 transactions
351   for (int i = 0; is_client(num) && i < iterations; i++) {
352     Parcel data, reply;
353     Tick sta, end;
354     // the target is paired to make it easier to diagnose
355     int target = num % server_count;
356 
357     // 1. transaction by fifo thread
358     thread_transaction(target, &results_fifo);
359     parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
360     thread_dump("other-caller");
361 
362     // 2. transaction by other thread
363     sta = tickNow();
364     ASSERT(NO_ERROR == workers[target]->transact(BINDER_NOP, data, &reply));
365     end = tickNow();
366     results_other.add_time(tickNano(sta, end));
367 
368     no_inherent += reply.readInt32();
369     no_sync += reply.readInt32();
370   }
371   // Signal completion to master and wait.
372   p.signal();
373   p.wait();
374 
375   p.send(&dummy);
376   // wait for kill
377   p.wait();
378   // Client for each pair dump here
379   if (is_client(num)) {
380     int no_trans = iterations * 2;
381     double sync_ratio = (1.0 - (double)no_sync / no_trans);
382     // TODO: libjson?
383     cout << "\"P" << (num - server_count) << "\":{\"SYNC\":\""
384          << ((sync_ratio > GOOD_SYNC_MIN) ? "GOOD" : "POOR") << "\","
385          << "\"S\":" << (no_trans - no_sync) << ",\"I\":" << no_trans << ","
386          << "\"R\":" << sync_ratio << "," << endl;
387 
388     cout << "  \"other_ms\":";
389     results_other.dump();
390     cout << "," << endl;
391     cout << "  \"fifo_ms\": ";
392     results_fifo.dump();
393     cout << endl;
394     cout << "}," << endl;
395   }
396   exit(no_inherent);
397 }
398 
make_process(int num,int iterations,int no_process,int payload_size)399 Pipe make_process(int num, int iterations, int no_process, int payload_size) {
400   auto pipe_pair = Pipe::createPipePair();
401   pid_t pid = fork();
402   if (pid) {
403     // parent
404     return std::move(get<0>(pipe_pair));
405   } else {
406     // child
407     thread_dump(is_client(num) ? "client" : "server");
408     worker_fx(num, no_process, iterations, payload_size, std::move(get<1>(pipe_pair)));
409     // never get here
410     return std::move(get<0>(pipe_pair));
411   }
412 }
413 
wait_all(vector<Pipe> & v)414 void wait_all(vector<Pipe>& v) {
415   for (size_t i = 0; i < v.size(); i++) {
416     v[i].wait();
417   }
418 }
419 
signal_all(vector<Pipe> & v)420 void signal_all(vector<Pipe>& v) {
421   for (size_t i = 0; i < v.size(); i++) {
422     v[i].signal();
423   }
424 }
425 
426 // This test is modified from binderThroughputTest.cpp
main(int argc,char ** argv)427 int main(int argc, char** argv) {
428   for (int i = 1; i < argc; i++) {
429     if (string(argv[i]) == "-i") {
430       iterations = atoi(argv[i + 1]);
431       i++;
432       continue;
433     }
434     if (string(argv[i]) == "-pair") {
435       no_process = 2 * atoi(argv[i + 1]);
436       i++;
437       continue;
438     }
439     if (string(argv[i]) == "-deadline_us") {
440       deadline_us = atoi(argv[i + 1]);
441       i++;
442       continue;
443     }
444     if (string(argv[i]) == "-v") {
445       verbose = 1;
446     }
447     // The -trace argument is used like that:
448     //
449     // First start trace with atrace command as usual
450     // >atrace --async_start sched freq
451     //
452     // then use schd-dbg with -trace arguments
453     //./schd-dbg -trace -deadline_us 2500
454     //
455     // This makes schd-dbg to stop trace once it detects a transaction
456     // duration over the deadline. By writing '0' to
457     // /sys/kernel/debug/tracing and halt the process. The tracelog is
458     // then available on /sys/kernel/debug/trace
459     if (string(argv[i]) == "-trace") {
460       trace = 1;
461     }
462   }
463   if (trace && !traceIsOn()) {
464     cout << "trace is not running" << endl;
465     cout << "check " << trace_path + "/tracing_on" << endl;
466     cout << "use atrace --async_start first" << endl;
467     exit(-1);
468   }
469   vector<Pipe> pipes;
470   thread_dump("main");
471   // TODO: libjson?
472   cout << "{" << endl;
473   cout << "\"cfg\":{\"pair\":" << (no_process / 2)
474        << ",\"iterations\":" << iterations << ",\"deadline_us\":" << deadline_us
475        << "}," << endl;
476 
477   // the main process fork 2 processes for each pairs
478   // 1 server + 1 client
479   // each has a pipe to communicate with
480   for (int i = 0; i < no_process; i++) {
481     pipes.push_back(make_process(i, iterations, no_process, payload_size));
482   }
483   // wait for init done
484   wait_all(pipes);
485   // kick-off iterations
486   signal_all(pipes);
487   // wait for completion
488   wait_all(pipes);
489   // start to send result
490   signal_all(pipes);
491   for (int i = 0; i < no_process; i++) {
492     int status;
493     // kill
494     pipes[i].signal();
495     wait(&status);
496     // the exit status is number of transactions without priority inheritance
497     // detected in the child process
498     no_inherent += status;
499   }
500   // TODO: libjson?
501   cout << "\"inheritance\": " << (no_inherent == 0 ? "\"PASS\"" : "\"FAIL\"")
502        << endl;
503   cout << "}" << endl;
504   return -no_inherent;
505 }
506