blob: 27320712f0f8e935bb8fc9839a4e73a534ae2701 [file] [log] [blame]
Howard Chenc135dbc2017-03-25 17:12:59 +08001#include <binder/Binder.h>
2#include <binder/IBinder.h>
3#include <binder/IPCThreadState.h>
4#include <binder/IServiceManager.h>
5#include <cstdio>
6#include <cstdlib>
7#include <cstring>
8#include <string>
9
10#include <iomanip>
11#include <iostream>
12#include <tuple>
13#include <vector>
14
15#include <pthread.h>
16#include <sys/wait.h>
17#include <unistd.h>
18
19using namespace std;
20using namespace android;
21
22enum BinderWorkerServiceCode {
23 BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION,
24};
25
26#define ASSERT(cond) \
27 do { \
28 if (!(cond)) { \
29 cerr << __func__ << ":" << __LINE__ << " condition:" << #cond \
30 << " failed\n" \
31 << endl; \
32 exit(EXIT_FAILURE); \
33 } \
34 } while (0)
35
36vector<sp<IBinder> > workers;
37
38// the ratio that the service is synced on the same cpu beyond
39// GOOD_SYNC_MIN is considered as good
40#define GOOD_SYNC_MIN (0.6)
41
42#define DUMP_PRICISION 3
43
44// the default value
45int no_process = 2;
46int iterations = 100;
47int payload_size = 16;
48int no_inherent = 0;
49int no_sync = 0;
50int verbose = 0;
51
52// the deadline latency that we are interested in
53uint64_t deadline_us = 2500;
54
55int thread_pri() {
56 struct sched_param param;
57 int policy;
58 ASSERT(!pthread_getschedparam(pthread_self(), &policy, &param));
59 return param.sched_priority;
60}
61
62void thread_dump(const char* prefix) {
63 struct sched_param param;
64 int policy;
65 if (!verbose) return;
66 cout << "--------------------------------------------------" << endl;
67 cout << setw(12) << left << prefix << " pid: " << getpid()
68 << " tid: " << gettid() << " cpu: " << sched_getcpu() << endl;
69 ASSERT(!pthread_getschedparam(pthread_self(), &policy, &param));
70 string s = (policy == SCHED_OTHER)
71 ? "SCHED_OTHER"
72 : (policy == SCHED_FIFO)
73 ? "SCHED_FIFO"
74 : (policy == SCHED_RR) ? "SCHED_RR" : "???";
75 cout << setw(12) << left << s << param.sched_priority << endl;
76 return;
77}
78
79class BinderWorkerService : public BBinder {
80 public:
81 BinderWorkerService() {
82 }
83 ~BinderWorkerService() {
84 }
85 virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
86 uint32_t flags = 0) {
87 (void)flags;
88 (void)data;
89 (void)reply;
90 switch (code) {
91 // The transaction format is like
92 //
93 // data[in]: int32: caller priority
94 // int32: caller cpu
95 //
96 // reply[out]: int32: 1 if caller's priority != callee's priority
97 // int32: 1 if caller's cpu != callee's cpu
98 //
99 // note the caller cpu read here is not always correct
100 // there're still chances that the caller got switched out
101 // right after it read the cpu number and still before the transaction.
102 case BINDER_NOP: {
103 thread_dump("binder");
104 int priority = thread_pri();
105 int priority_caller = data.readInt32();
106 int h = 0, s = 0;
107 if (priority_caller != priority) {
108 h++;
109 if (verbose) {
110 cout << "err priority_caller:" << priority_caller
111 << ", priority:" << priority << endl;
112 }
113 }
114 if (priority == sched_get_priority_max(SCHED_FIFO)) {
115 int cpu = sched_getcpu();
116 int cpu_caller = data.readInt32();
117 if (cpu != cpu_caller) {
118 s++;
119 }
120 }
121 reply->writeInt32(h);
122 reply->writeInt32(s);
123 return NO_ERROR;
124 }
125 default:
126 return UNKNOWN_TRANSACTION;
127 };
128 }
129};
130
131class Pipe {
132 int m_readFd;
133 int m_writeFd;
134 Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {
135 }
136 Pipe(const Pipe&) = delete;
137 Pipe& operator=(const Pipe&) = delete;
138 Pipe& operator=(const Pipe&&) = delete;
139
140 public:
141 Pipe(Pipe&& rval) noexcept {
142 m_readFd = rval.m_readFd;
143 m_writeFd = rval.m_writeFd;
144 rval.m_readFd = 0;
145 rval.m_writeFd = 0;
146 }
147 ~Pipe() {
148 if (m_readFd) close(m_readFd);
149 if (m_writeFd) close(m_writeFd);
150 }
151 void signal() {
152 bool val = true;
153 int error = write(m_writeFd, &val, sizeof(val));
154 ASSERT(error >= 0);
155 };
156 void wait() {
157 bool val = false;
158 int error = read(m_readFd, &val, sizeof(val));
159 ASSERT(error >= 0);
160 }
161 template <typename T>
162 void send(const T& v) {
163 int error = write(m_writeFd, &v, sizeof(T));
164 ASSERT(error >= 0);
165 }
166 template <typename T>
167 void recv(T& v) {
168 int error = read(m_readFd, &v, sizeof(T));
169 ASSERT(error >= 0);
170 }
171 static tuple<Pipe, Pipe> createPipePair() {
172 int a[2];
173 int b[2];
174
175 int error1 = pipe(a);
176 int error2 = pipe(b);
177 ASSERT(error1 >= 0);
178 ASSERT(error2 >= 0);
179
180 return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
181 }
182};
183
184typedef chrono::time_point<chrono::high_resolution_clock> Tick;
185
186static inline Tick tickNow() {
187 return chrono::high_resolution_clock::now();
188}
189
190static inline uint64_t tickNano(Tick& sta, Tick& end) {
191 return uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - sta).count());
192}
193
194struct Results {
195 uint64_t m_best = 0xffffffffffffffffULL;
196 uint64_t m_worst = 0;
197 uint64_t m_transactions = 0;
198 uint64_t m_total_time = 0;
199 uint64_t m_miss = 0;
200
201 void add_time(uint64_t nano) {
202 m_best = min(nano, m_best);
203 m_worst = max(nano, m_worst);
204 m_transactions += 1;
205 m_total_time += nano;
206 if (nano > deadline_us * 1000) m_miss++;
207 }
208 void dump() {
209 double best = (double)m_best / 1.0E6;
210 double worst = (double)m_worst / 1.0E6;
211 double average = (double)m_total_time / m_transactions / 1.0E6;
212 // FIXME: libjson?
213 cout << std::setprecision(DUMP_PRICISION) << "{ \"avg\":" << setw(5) << left
214 << average << ", \"wst\":" << setw(5) << left << worst
215 << ", \"bst\":" << setw(5) << left << best << ", \"miss\":" << m_miss
216 << "}";
217 }
218};
219
220String16 generateServiceName(int num) {
221 char num_str[32];
222 snprintf(num_str, sizeof(num_str), "%d", num);
223 String16 serviceName = String16("binderWorker") + String16(num_str);
224 return serviceName;
225}
226
227static void parcel_fill(Parcel& data, int sz, int priority, int cpu) {
228 ASSERT(sz >= (int)sizeof(uint32_t) * 2);
229 data.writeInt32(priority);
230 data.writeInt32(cpu);
231 sz -= sizeof(uint32_t);
232 while (sz > (int)sizeof(uint32_t)) {
233 data.writeInt32(0);
234 sz -= sizeof(uint32_t);
235 }
236}
237
238static void* thread_start(void* p) {
239 Results* results_fifo = (Results*)p;
240 Parcel data, reply;
241 Tick sta, end;
242
243 parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
244 thread_dump("fifo-caller");
245
246 sta = tickNow();
247 status_t ret = workers[0]->transact(BINDER_NOP, data, &reply);
248 end = tickNow();
249 results_fifo->add_time(tickNano(sta, end));
250
251 no_inherent += reply.readInt32();
252 no_sync += reply.readInt32();
253 return 0;
254}
255
256// create a fifo thread to transact and wait it to finished
257static void thread_transaction(Results* results_fifo) {
258 void* dummy;
259 pthread_t thread;
260 pthread_attr_t attr;
261 struct sched_param param;
262 ASSERT(!pthread_attr_init(&attr));
263 ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
264 param.sched_priority = sched_get_priority_max(SCHED_FIFO);
265 ASSERT(!pthread_attr_setschedparam(&attr, &param));
266 ASSERT(!pthread_create(&thread, &attr, &thread_start, results_fifo));
267 ASSERT(!pthread_join(thread, &dummy));
268}
269
270#define is_client(_num) ((_num) >= (no_process / 2))
271
272void worker_fx(int num, int no_process, int iterations, int payload_size,
273 Pipe p) {
274 int dummy;
275 Results results_other, results_fifo;
276
277 // Create BinderWorkerService and for go.
278 ProcessState::self()->startThreadPool();
279 sp<IServiceManager> serviceMgr = defaultServiceManager();
280 sp<BinderWorkerService> service = new BinderWorkerService;
281 serviceMgr->addService(generateServiceName(num), service);
282 p.signal();
283 p.wait();
284
285 // If client/server pairs, then half the workers are
286 // servers and half are clients
287 int server_count = no_process / 2;
288
289 for (int i = 0; i < server_count; i++) {
290 // self service is in-process so just skip
291 if (num == i) continue;
292 workers.push_back(serviceMgr->getService(generateServiceName(i)));
293 }
294
295 // Client for each pair iterates here
296 // each iterations contains exatcly 2 transactions
297 for (int i = 0; is_client(num) && i < iterations; i++) {
298 Parcel data, reply;
299 Tick sta, end;
300 // the target is paired to make it easier to diagnose
301 int target = num % server_count;
302
303 // 1. transaction by fifo thread
304 thread_transaction(&results_fifo);
305 parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
306 thread_dump("other-caller");
307
308 // 2. transaction by other thread
309 sta = tickNow();
310 ASSERT(NO_ERROR == workers[target]->transact(BINDER_NOP, data, &reply));
311 end = tickNow();
312 results_other.add_time(tickNano(sta, end));
313
314 no_inherent += reply.readInt32();
315 no_sync += reply.readInt32();
316 }
317 // Signal completion to master and wait.
318 p.signal();
319 p.wait();
320
321 p.send(&dummy);
322 p.wait();
323 // Client for each pair dump here
324 if (is_client(num)) {
325 int no_trans = iterations * 2;
326 double sync_ratio = (1.0 - (double)no_sync / no_trans);
327 // FIXME: libjson?
328 cout << "\"P" << (num - server_count) << "\":{\"SYNC\":\""
329 << ((sync_ratio > GOOD_SYNC_MIN) ? "GOOD" : "POOR") << "\","
330 << "\"S\":" << (no_trans - no_sync) << ",\"I\":" << no_trans << ","
331 << "\"R\":" << sync_ratio << "," << endl;
332
333 cout << " \"other_ms\":";
334 results_other.dump();
335 cout << "," << endl;
336 cout << " \"fifo_ms\": ";
337 results_fifo.dump();
338 cout << endl;
339 cout << "}," << endl;
340 }
341 exit(no_inherent);
342}
343
344Pipe make_process(int num, int iterations, int no_process, int payload_size) {
345 auto pipe_pair = Pipe::createPipePair();
346 pid_t pid = fork();
347 if (pid) {
348 // parent
349 return move(get<0>(pipe_pair));
350 } else {
351 // child
352 thread_dump(is_client(num) ? "client" : "server");
353 worker_fx(num, no_process, iterations, payload_size,
354 move(get<1>(pipe_pair)));
355 // never get here
356 return move(get<0>(pipe_pair));
357 }
358}
359
360void wait_all(vector<Pipe>& v) {
361 for (size_t i = 0; i < v.size(); i++) {
362 v[i].wait();
363 }
364}
365
366void signal_all(vector<Pipe>& v) {
367 for (size_t i = 0; i < v.size(); i++) {
368 v[i].signal();
369 }
370}
371
372// This test is modified from binderThroughputTest.cpp
373int main(int argc, char** argv) {
374 for (int i = 1; i < argc; i++) {
375 if (string(argv[i]) == "-i") {
376 iterations = atoi(argv[i + 1]);
377 i++;
378 continue;
379 }
380 if (string(argv[i]) == "-pair") {
381 no_process = 2 * atoi(argv[i + 1]);
382 i++;
383 continue;
384 }
385 if (string(argv[i]) == "-deadline_us") {
386 deadline_us = atoi(argv[i + 1]);
387 i++;
388 continue;
389 }
390 if (string(argv[i]) == "-v") {
391 verbose = 1;
392 i++;
393 }
394 }
395 vector<Pipe> pipes;
396 thread_dump("main");
397 // FIXME: libjson?
398 cout << "{" << endl;
399 cout << "\"cfg\":{\"pair\":" << (no_process / 2)
400 << ",\"iterations\":" << iterations << ",\"deadline_us\":" << deadline_us
401 << "}," << endl;
402
403 // the main process fork 2 processes for each pairs
404 // 1 server + 1 client
405 // each has a pipe to communicate with
406 for (int i = 0; i < no_process; i++) {
407 pipes.push_back(make_process(i, iterations, no_process, payload_size));
408 }
409 wait_all(pipes);
410 signal_all(pipes);
411 wait_all(pipes);
412 signal_all(pipes);
413 for (int i = 0; i < no_process; i++) {
414 int status;
415 pipes[i].signal();
416 wait(&status);
417 // the exit status is number of transactions without priority inheritance
418 // detected in the child process
419 no_inherent += status;
420 }
421 // FIXME: libjson?
422 cout << "\"inheritance\": " << (no_inherent == 0 ? "\"PASS\"" : "\"FAIL\"")
423 << endl;
424 cout << "}" << endl;
425 return -no_inherent;
426}