diff options
author | Søren Sandmann <sandmann@daimi.au.dk> | 2010-03-09 11:59:00 -0500 |
---|---|---|
committer | Søren Sandmann <sandmann@daimi.au.dk> | 2010-03-09 12:27:43 -0500 |
commit | 1707bebe93fbf122b0b79b7110236e389de1788e (patch) | |
tree | 67901e5429e7e239439550d5e612430d8ff41cc1 | |
parent | 001d3b8e42126ad34416ca811d4c920581a84d28 (diff) |
Stop using double mmap trick.
It has issues on various architectures, such as ARM, and there was
never any guarantee that it would actually work. So since we'd need
fallback code anyway, and the main point of it was simplifying the
code, just get rid of it and handle the overflow conditions manually.
-rw-r--r-- | collector.c | 80 |
1 files changed, 46 insertions, 34 deletions
diff --git a/collector.c b/collector.c index 5f50ce0..d034405 100644 --- a/collector.c +++ b/collector.c @@ -246,17 +246,13 @@ on_read (gpointer data) { counter_t *counter = data; int mask = (N_PAGES * get_page_size() - 1); + int n_bytes = mask + 1; gboolean skip_samples; Collector *collector; uint64_t head, tail; collector = counter->collector; -#if 0 - int n_bytes = mask + 1; - int x; -#endif - tail = counter->tail; head = counter->mmap_page->data_head; @@ -283,7 +279,22 @@ on_read (gpointer data) while (head - tail >= sizeof (struct perf_event_header)) { - struct perf_event_header *header = (void *)(counter->data + (tail & mask)); + struct perf_event_header *header; + guint8 buffer[4096]; + guint8 *free_me; + + free_me = NULL; + + /* Note that: + * + * - perf events are a multiple of 64 bits + * - the perf event header is 64 bits + * - the data area is a multiple of 64 bits + * + * which means there will always be space for one header, which means we + * can safely dereference the size field. + */ + header = (struct perf_event_header *)(counter->data + (tail & mask)); if (header->size > head - tail) { @@ -294,14 +305,36 @@ on_read (gpointer data) break; } - if (!skip_samples || header->type != PERF_EVENT_SAMPLE) + if (counter->data + (tail & mask) + header->size > counter->data + n_bytes) { + int n_before, n_after; + guint8 *b; + + if (header->size > sizeof (buffer)) + free_me = b = g_malloc (header->size); + else + b = buffer; + + n_after = (tail & mask) + header->size - n_bytes; + n_before = header->size - n_after; + + memcpy (b, counter->data + (tail & mask), n_before); + memcpy (b + n_before, counter->data, n_after); + + header = (struct perf_event_header *)b; + } + + if (!skip_samples || header->type != PERF_EVENT_SAMPLE) + { if (header->type == PERF_EVENT_SAMPLE) - collector->n_samples++; + collector->n_samples++; process_event (collector, counter, (counter_event_t *)header); - } - + } + + if (free_me) + g_free (free_me); + tail += header->size; } @@ -334,33 +367,12 @@ static void * map_buffer (counter_t *counter, GError **err) { int n_bytes = N_PAGES * get_page_size(); - void *address, *a; + void *address; - /* We map the ring buffer twice in consecutive address space, - * so that we don't need special-case code to deal with wrapping. - */ - address = mmap (NULL, n_bytes * 2 + get_page_size(), PROT_NONE, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + address = mmap (NULL, n_bytes + get_page_size(), PROT_READ | PROT_WRITE, MAP_SHARED, counter->fd, 0); if (address == MAP_FAILED) - return fail (err, "mmap"); - - a = mmap (address + n_bytes, n_bytes + get_page_size(), - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_FIXED, counter->fd, 0); - - if (a != address + n_bytes) - return fail (err, "mmap"); - - a = mmap (address, n_bytes + get_page_size(), - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_FIXED, counter->fd, 0); - - if (a == MAP_FAILED || a != address) - return fail (err, "mmap"); - - if (a != address) - return fail (err, "mmap"); + return fail (err, "mmap"); return address; } |