blob: 4d1490457acd968924cd385f8aeec383405a0ecb [file] [log] [blame]
Glenn Kasten11d8dfc2013-01-14 14:53:13 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "NBLog"
18//#define LOG_NDEBUG 0
19
20#include <stdarg.h>
21#include <stdint.h>
22#include <stdio.h>
23#include <string.h>
24#include <time.h>
25#include <new>
26#include <cutils/atomic.h>
27#include <media/nbaio/NBLog.h>
28#include <utils/Log.h>
Glenn Kasten4e01ef62013-07-11 14:29:59 -070029#include <utils/String8.h>
Glenn Kasten11d8dfc2013-01-14 14:53:13 -080030
31namespace android {
32
33int NBLog::Entry::readAt(size_t offset) const
34{
35 // FIXME This is too slow, despite the name it is used during writing
36 if (offset == 0)
37 return mEvent;
38 else if (offset == 1)
39 return mLength;
40 else if (offset < (size_t) (mLength + 2))
41 return ((char *) mData)[offset - 2];
42 else if (offset == (size_t) (mLength + 2))
43 return mLength;
44 else
45 return 0;
46}
47
48// ---------------------------------------------------------------------------
49
50#if 0 // FIXME see note in NBLog.h
51NBLog::Timeline::Timeline(size_t size, void *shared)
52 : mSize(roundup(size)), mOwn(shared == NULL),
53 mShared((Shared *) (mOwn ? new char[sharedSize(size)] : shared))
54{
55 new (mShared) Shared;
56}
57
58NBLog::Timeline::~Timeline()
59{
60 mShared->~Shared();
61 if (mOwn) {
62 delete[] (char *) mShared;
63 }
64}
65#endif
66
67/*static*/
68size_t NBLog::Timeline::sharedSize(size_t size)
69{
70 return sizeof(Shared) + roundup(size);
71}
72
73// ---------------------------------------------------------------------------
74
75NBLog::Writer::Writer()
76 : mSize(0), mShared(NULL), mRear(0), mEnabled(false)
77{
78}
79
80NBLog::Writer::Writer(size_t size, void *shared)
81 : mSize(roundup(size)), mShared((Shared *) shared), mRear(0), mEnabled(mShared != NULL)
82{
83}
84
85NBLog::Writer::Writer(size_t size, const sp<IMemory>& iMemory)
86 : mSize(roundup(size)), mShared(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL),
87 mIMemory(iMemory), mRear(0), mEnabled(mShared != NULL)
88{
89}
90
91void NBLog::Writer::log(const char *string)
92{
93 if (!mEnabled) {
94 return;
95 }
96 size_t length = strlen(string);
97 if (length > 255) {
98 length = 255;
99 }
100 log(EVENT_STRING, string, length);
101}
102
103void NBLog::Writer::logf(const char *fmt, ...)
104{
105 if (!mEnabled) {
106 return;
107 }
108 va_list ap;
109 va_start(ap, fmt);
110 Writer::logvf(fmt, ap); // the Writer:: is needed to avoid virtual dispatch for LockedWriter
111 va_end(ap);
112}
113
114void NBLog::Writer::logvf(const char *fmt, va_list ap)
115{
116 if (!mEnabled) {
117 return;
118 }
119 char buffer[256];
120 int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
121 if (length >= (int) sizeof(buffer)) {
122 length = sizeof(buffer) - 1;
123 // NUL termination is not required
124 // buffer[length] = '\0';
125 }
126 if (length >= 0) {
127 log(EVENT_STRING, buffer, length);
128 }
129}
130
131void NBLog::Writer::logTimestamp()
132{
133 if (!mEnabled) {
134 return;
135 }
136 struct timespec ts;
137 if (!clock_gettime(CLOCK_MONOTONIC, &ts)) {
138 log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec));
139 }
140}
141
142void NBLog::Writer::logTimestamp(const struct timespec& ts)
143{
144 if (!mEnabled) {
145 return;
146 }
147 log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec));
148}
149
150void NBLog::Writer::log(Event event, const void *data, size_t length)
151{
152 if (!mEnabled) {
153 return;
154 }
155 if (data == NULL || length > 255) {
156 return;
157 }
158 switch (event) {
159 case EVENT_STRING:
160 case EVENT_TIMESTAMP:
161 break;
162 case EVENT_RESERVED:
163 default:
164 return;
165 }
166 Entry entry(event, data, length);
167 log(&entry, true /*trusted*/);
168}
169
170void NBLog::Writer::log(const NBLog::Entry *entry, bool trusted)
171{
172 if (!mEnabled) {
173 return;
174 }
175 if (!trusted) {
176 log(entry->mEvent, entry->mData, entry->mLength);
177 return;
178 }
179 size_t rear = mRear & (mSize - 1);
180 size_t written = mSize - rear; // written = number of bytes that have been written so far
181 size_t need = entry->mLength + 3; // mEvent, mLength, data[length], mLength
182 // need = number of bytes remaining to write
183 if (written > need) {
184 written = need;
185 }
186 size_t i;
187 // FIXME optimize this using memcpy for the data part of the Entry.
188 // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
189 for (i = 0; i < written; ++i) {
190 mShared->mBuffer[rear + i] = entry->readAt(i);
191 }
192 if (rear + written == mSize && (need -= written) > 0) {
193 for (i = 0; i < need; ++i) {
194 mShared->mBuffer[i] = entry->readAt(written + i);
195 }
196 written += need;
197 }
198 android_atomic_release_store(mRear += written, &mShared->mRear);
199}
200
201bool NBLog::Writer::isEnabled() const
202{
203 return mEnabled;
204}
205
206bool NBLog::Writer::setEnabled(bool enabled)
207{
208 bool old = mEnabled;
209 mEnabled = enabled && mShared != NULL;
210 return old;
211}
212
213// ---------------------------------------------------------------------------
214
215NBLog::LockedWriter::LockedWriter()
216 : Writer()
217{
218}
219
220NBLog::LockedWriter::LockedWriter(size_t size, void *shared)
221 : Writer(size, shared)
222{
223}
224
225void NBLog::LockedWriter::log(const char *string)
226{
227 Mutex::Autolock _l(mLock);
228 Writer::log(string);
229}
230
231void NBLog::LockedWriter::logf(const char *fmt, ...)
232{
233 // FIXME should not take the lock until after formatting is done
234 Mutex::Autolock _l(mLock);
235 va_list ap;
236 va_start(ap, fmt);
237 Writer::logvf(fmt, ap);
238 va_end(ap);
239}
240
241void NBLog::LockedWriter::logvf(const char *fmt, va_list ap)
242{
243 // FIXME should not take the lock until after formatting is done
244 Mutex::Autolock _l(mLock);
245 Writer::logvf(fmt, ap);
246}
247
248void NBLog::LockedWriter::logTimestamp()
249{
250 // FIXME should not take the lock until after the clock_gettime() syscall
251 Mutex::Autolock _l(mLock);
252 Writer::logTimestamp();
253}
254
255void NBLog::LockedWriter::logTimestamp(const struct timespec& ts)
256{
257 Mutex::Autolock _l(mLock);
258 Writer::logTimestamp(ts);
259}
260
261bool NBLog::LockedWriter::isEnabled() const
262{
263 Mutex::Autolock _l(mLock);
264 return Writer::isEnabled();
265}
266
267bool NBLog::LockedWriter::setEnabled(bool enabled)
268{
269 Mutex::Autolock _l(mLock);
270 return Writer::setEnabled(enabled);
271}
272
273// ---------------------------------------------------------------------------
274
275NBLog::Reader::Reader(size_t size, const void *shared)
276 : mSize(roundup(size)), mShared((const Shared *) shared), mFront(0)
277{
278}
279
280NBLog::Reader::Reader(size_t size, const sp<IMemory>& iMemory)
281 : mSize(roundup(size)), mShared(iMemory != 0 ? (const Shared *) iMemory->pointer() : NULL),
282 mIMemory(iMemory), mFront(0)
283{
284}
285
286void NBLog::Reader::dump(int fd, size_t indent)
287{
288 int32_t rear = android_atomic_acquire_load(&mShared->mRear);
289 size_t avail = rear - mFront;
290 if (avail == 0) {
291 return;
292 }
293 size_t lost = 0;
294 if (avail > mSize) {
295 lost = avail - mSize;
296 mFront += lost;
297 avail = mSize;
298 }
299 size_t remaining = avail; // remaining = number of bytes left to read
300 size_t front = mFront & (mSize - 1);
301 size_t read = mSize - front; // read = number of bytes that have been read so far
302 if (read > remaining) {
303 read = remaining;
304 }
305 // make a copy to avoid race condition with writer
306 uint8_t *copy = new uint8_t[avail];
307 // copy first part of circular buffer up until the wraparound point
308 memcpy(copy, &mShared->mBuffer[front], read);
309 if (front + read == mSize) {
310 if ((remaining -= read) > 0) {
311 // copy second part of circular buffer starting at beginning
312 memcpy(&copy[read], mShared->mBuffer, remaining);
313 read += remaining;
314 // remaining = 0 but not necessary
315 }
316 }
317 mFront += read;
318 size_t i = avail;
319 Event event;
320 size_t length;
321 struct timespec ts;
322 time_t maxSec = -1;
323 while (i >= 3) {
324 length = copy[i - 1];
325 if (length + 3 > i || copy[i - length - 2] != length) {
326 break;
327 }
328 event = (Event) copy[i - length - 3];
329 if (event == EVENT_TIMESTAMP) {
330 if (length != sizeof(struct timespec)) {
331 // corrupt
332 break;
333 }
334 memcpy(&ts, &copy[i - length - 1], sizeof(struct timespec));
335 if (ts.tv_sec > maxSec) {
336 maxSec = ts.tv_sec;
337 }
338 }
339 i -= length + 3;
340 }
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700341 mFd = fd;
342 mIndent = indent;
343 String8 timestamp, body;
Glenn Kastenc02c9612013-10-15 09:25:11 -0700344 lost += i;
345 if (lost > 0) {
Glenn Kasten95d287d2014-04-28 14:11:45 -0700346 body.appendFormat("warning: lost %zu bytes worth of events", lost);
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700347 // TODO timestamp empty here, only other choice to wait for the first timestamp event in the
348 // log to push it out. Consider keeping the timestamp/body between calls to readAt().
349 dumpLine(timestamp, body);
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800350 }
351 size_t width = 1;
352 while (maxSec >= 10) {
353 ++width;
354 maxSec /= 10;
355 }
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800356 if (maxSec >= 0) {
Glenn Kasten95d287d2014-04-28 14:11:45 -0700357 timestamp.appendFormat("[%*s]", (int) width + 4, "");
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800358 }
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700359 bool deferredTimestamp = false;
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800360 while (i < avail) {
361 event = (Event) copy[i];
362 length = copy[i + 1];
363 const void *data = &copy[i + 2];
364 size_t advance = length + 3;
365 switch (event) {
366 case EVENT_STRING:
Glenn Kasten95d287d2014-04-28 14:11:45 -0700367 body.appendFormat("%.*s", (int) length, (const char *) data);
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700368 break;
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800369 case EVENT_TIMESTAMP: {
370 // already checked that length == sizeof(struct timespec);
371 memcpy(&ts, data, sizeof(struct timespec));
372 long prevNsec = ts.tv_nsec;
373 long deltaMin = LONG_MAX;
374 long deltaMax = -1;
375 long deltaTotal = 0;
376 size_t j = i;
377 for (;;) {
378 j += sizeof(struct timespec) + 3;
379 if (j >= avail || (Event) copy[j] != EVENT_TIMESTAMP) {
380 break;
381 }
382 struct timespec tsNext;
383 memcpy(&tsNext, &copy[j + 2], sizeof(struct timespec));
384 if (tsNext.tv_sec != ts.tv_sec) {
385 break;
386 }
387 long delta = tsNext.tv_nsec - prevNsec;
388 if (delta < 0) {
389 break;
390 }
391 if (delta < deltaMin) {
392 deltaMin = delta;
393 }
394 if (delta > deltaMax) {
395 deltaMax = delta;
396 }
397 deltaTotal += delta;
398 prevNsec = tsNext.tv_nsec;
399 }
400 size_t n = (j - i) / (sizeof(struct timespec) + 3);
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700401 if (deferredTimestamp) {
402 dumpLine(timestamp, body);
403 deferredTimestamp = false;
404 }
405 timestamp.clear();
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800406 if (n >= kSquashTimestamp) {
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700407 timestamp.appendFormat("[%d.%03d to .%.03d by .%.03d to .%.03d]",
408 (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000),
409 (int) ((ts.tv_nsec + deltaTotal) / 1000000),
410 (int) (deltaMin / 1000000), (int) (deltaMax / 1000000));
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800411 i = j;
412 advance = 0;
413 break;
414 }
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700415 timestamp.appendFormat("[%d.%03d]", (int) ts.tv_sec,
416 (int) (ts.tv_nsec / 1000000));
417 deferredTimestamp = true;
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800418 } break;
419 case EVENT_RESERVED:
420 default:
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700421 body.appendFormat("warning: unknown event %d", event);
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800422 break;
423 }
424 i += advance;
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700425
426 if (!body.isEmpty()) {
427 dumpLine(timestamp, body);
428 deferredTimestamp = false;
429 }
430 }
431 if (deferredTimestamp) {
432 dumpLine(timestamp, body);
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800433 }
434 // FIXME it would be more efficient to put a char mCopy[256] as a member variable of the dumper
435 delete[] copy;
436}
437
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700438void NBLog::Reader::dumpLine(const String8& timestamp, String8& body)
439{
440 if (mFd >= 0) {
Elliott Hughes8b5f6422014-05-22 01:22:06 -0700441 dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
Glenn Kasten4e01ef62013-07-11 14:29:59 -0700442 } else {
443 ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
444 }
445 body.clear();
446}
447
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800448bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
449{
Glenn Kasten481fb672013-09-30 14:39:28 -0700450 return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
Glenn Kasten11d8dfc2013-01-14 14:53:13 -0800451}
452
453} // namespace android