summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2007-12-16 14:56:22 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2007-12-16 14:56:22 +0000
commitf8e3e1413b26bf95fdc6223f6ae67c3caddfb000 (patch)
tree19ab3a1e6d11539c8f2944913acfeaa05d5aa3c0
parenta0f23201e3a411ab165d87b060ffdadfa708679f (diff)
Allow user to limit number of callers to uniquely identify allocators.
-rw-r--r--src/app.c10
-rw-r--r--src/callgraph-store.c11
-rw-r--r--src/lwp-events.h14
-rw-r--r--src/lwp.c99
4 files changed, 119 insertions, 15 deletions
diff --git a/src/app.c b/src/app.c
index 349ee0e..6be4107 100644
--- a/src/app.c
+++ b/src/app.c
@@ -1497,9 +1497,9 @@ lwp_discard (int fd)
switch (ev.type) {
case LWP_INIT:
- break;
-
case LWP_FINI:
+ case LWP_DLOPEN:
+ case LWP_DLCLOSE:
break;
case LWP_MALLOC:
@@ -1669,7 +1669,7 @@ lwp_read (GIOChannel *io, App *app)
}
ev.type = c;
- if (! (ev.type == LWP_INIT || ev.type == LWP_FINI)) {
+ if (ev.allocator) {
A = _client_get_allocator (&app->client,
ev.allocator);
g_return_val_if_fail (A != NULL, FALSE);
@@ -1684,6 +1684,10 @@ lwp_read (GIOChannel *io, App *app)
app->client.terminated = TRUE;
break;
+ case LWP_DLOPEN:
+ case LWP_DLCLOSE:
+ break;
+
case LWP_MALLOC:
readn (fd, &ev.event.malloc.size, sizeof (ev.event.malloc.size));
readn (fd, &ev.event.malloc.addr, sizeof (ev.event.malloc.addr));
diff --git a/src/callgraph-store.c b/src/callgraph-store.c
index ac07e98..eb80638 100644
--- a/src/callgraph-store.c
+++ b/src/callgraph-store.c
@@ -946,6 +946,8 @@ call_graph_store_update (CallGraphStore *store,
AA = child->allocator;
if (AA != NULL) {
+ guint min_frames;
+
if (AA == A)
break;
@@ -955,7 +957,8 @@ call_graph_store_update (CallGraphStore *store,
/* insert children for this pair of allocators */
n++;
- while (A->frames[n] == AA->frames[n]) {
+ min_frames = MIN (A->n_frames, AA->n_frames);
+ while (n < min_frames && A->frames[n] == AA->frames[n]) {
child = _call_graph_frame_new (store, AA, n, child);
_call_graph_frame_accumulate (child, At, Ap);
child->allocator = NULL;
@@ -963,8 +966,10 @@ call_graph_store_update (CallGraphStore *store,
g_assert (n < A->n_frames && n < AA->n_frames);
}
- _call_graph_frame_new (store, AA, n, child);
- _call_graph_frame_new (store, A, n, child);
+ if (n < AA->n_frames)
+ _call_graph_frame_new (store, AA, n, child);
+ if (n < A->n_frames)
+ _call_graph_frame_new (store, A, n, child);
break;
} else
frame = child;
diff --git a/src/lwp-events.h b/src/lwp-events.h
index a4d2070..ab11141 100644
--- a/src/lwp-events.h
+++ b/src/lwp-events.h
@@ -35,7 +35,10 @@ typedef enum {
LWP_MALLOC,
LWP_MEMALIGN,
LWP_REALLOC,
- LWP_FREE
+ LWP_FREE,
+
+ LWP_DLOPEN,
+ LWP_DLCLOSE
} LWP_EventType;
typedef struct _lwp_event_init {
@@ -67,6 +70,12 @@ typedef struct _lwp_event_free {
gpointer addr;
} LWP_EventFree;
+typedef struct _lwp_event_dlopen {
+} LWP_EventDlopen;
+
+typedef struct _lwp_event_dlclose {
+} LWP_EventDlclose;
+
typedef union _lwp_event {
LWP_EventInit init;
LWP_EventFini fini;
@@ -74,6 +83,9 @@ typedef union _lwp_event {
LWP_EventMemalign memalign;
LWP_EventRealloc realloc;
LWP_EventFree free;
+
+ LWP_EventDlopen dlopen;
+ LWP_EventDlclose dlclose;
} LWP_Event;
typedef struct _lwp_event_record {
diff --git a/src/lwp.c b/src/lwp.c
index 512e37d..054df59 100644
--- a/src/lwp.c
+++ b/src/lwp.c
@@ -97,6 +97,7 @@ struct _lwp_allocator {
static pthread_mutex_t event_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static gboolean _lwp_disable_stacktraces;
+static guint _lwp_num_callers = (guint) -1;
static GSList *pending_allocators;
static LWP_Allocator *allocator_ht[31627];
@@ -126,6 +127,9 @@ DLSYM_DECLARE (memalign);
DLSYM_DECLARE (posix_memalign);
DLSYM_DECLARE (free);
+DLSYM_DECLARE (dlopen);
+DLSYM_DECLARE (dlclose);
+
static gpointer
_lwp_perm_alloc (gsize size)
{
@@ -379,7 +383,6 @@ _lwp_write_events (const LWP_EventRecord *events, gushort n_events)
if (! _lwp_writen (fd, &time, sizeof (time)))
goto CLEAN_FD;
- /* XXX dlclose */
if (n_pending_symbols) {
_lwp_lookup_so (pending_symbols->data);
@@ -468,9 +471,9 @@ _lwp_write_events (const LWP_EventRecord *events, gushort n_events)
switch (events[count].type) {
case LWP_INIT:
- break;
-
case LWP_FINI:
+ case LWP_DLOPEN:
+ case LWP_DLCLOSE:
break;
case LWP_MALLOC:
@@ -664,21 +667,58 @@ _lwp_record_event (LWP_EventType type, const LWP_Event *ev)
static guint depth;
static gboolean _write_failed;
+ gboolean lookup_allocator = TRUE;
+ gboolean force_send = FALSE;
+
pthread_mutex_lock (&event_mutex);
if (depth++ || _write_failed)
goto unlock;
- if (type == LWP_INIT || type == LWP_FINI) {
+ switch (type) {
+ case LWP_INIT:
+ force_send = TRUE;
+ lookup_allocator = FALSE;
+ break;
+
+ case LWP_FINI:
+ force_send = TRUE;
+ lookup_allocator = FALSE;
+ break;
+
+ case LWP_DLOPEN:
+ lookup_allocator = FALSE;
+ break;
+
+ case LWP_DLCLOSE:
+ force_send = TRUE;
+ lookup_allocator = FALSE;
+ break;
+
+ case LWP_MALLOC:
+ case LWP_REALLOC:
+ case LWP_MEMALIGN:
+ case LWP_FREE:
+ default:
+ break;
+ }
+
+ if (! lookup_allocator) {
events[n_events].allocator = 0;
} else if (! _lwp_disable_stacktraces) {
gpointer stack_ips[1024 / sizeof (gpointer)], *ips;
guint n_ips, max_ips;
max_ips = G_N_ELEMENTS (stack_ips);
+ if (max_ips > _lwp_num_callers)
+ max_ips = _lwp_num_callers;
+
ips = stack_ips;
n_ips = backtrace (ips, max_ips);
- while (n_ips == max_ips) {
+ while (n_ips == max_ips && max_ips < _lwp_num_callers) {
max_ips *= 2;
+ if (max_ips > _lwp_num_callers)
+ max_ips = _lwp_num_callers;
+
ips = g_newa (gpointer, max_ips);
if (ips == NULL)
goto unlock;
@@ -700,6 +740,8 @@ _lwp_record_event (LWP_EventType type, const LWP_Event *ev)
default:
case LWP_INIT:
case LWP_FINI:
+ case LWP_DLOPEN:
+ case LWP_DLCLOSE:
case LWP_MALLOC: caller = malloc; break;
case LWP_MEMALIGN: caller = memalign; break;
case LWP_REALLOC: caller = realloc; break;
@@ -711,9 +753,7 @@ _lwp_record_event (LWP_EventType type, const LWP_Event *ev)
events[n_events].type = type;
events[n_events].time = _lwp_read_time ();
events[n_events].event = *ev;
- if (G_UNLIKELY (++n_events == G_N_ELEMENTS (events) ||
- type == LWP_INIT || type == LWP_FINI))
- {
+ if (G_UNLIKELY (++n_events == G_N_ELEMENTS (events) || force_send)) {
_write_failed = ! _lwp_write_events (events, n_events);
n_events = 0;
}
@@ -885,6 +925,39 @@ free (void *ptr)
_lwp_record_event (LWP_FREE, &event);
}
+void *
+dlopen (const char *filename, int flag)
+{
+ LWP_Event event;
+ void *ret;
+
+ if (! _lwp_dlcall_initialized)
+ return NULL;
+
+ ret = DLCALL (dlopen, filename, flag);
+
+ _lwp_record_event (LWP_DLOPEN, &event);
+
+ return ret;
+}
+
+int
+dlclose (void *handle)
+{
+ LWP_Event event;
+ int ret;
+
+ if (! _lwp_dlcall_initialized)
+ return -1;
+
+ _lwp_record_event (LWP_DLCLOSE, &event);
+
+ ret = DLCALL (dlclose, handle);
+
+ return ret;
+}
+
+
/* XXX __builtin_new */
static void __attribute__ ((constructor))
__lwp_init (void)
@@ -898,6 +971,13 @@ __lwp_init (void)
env = getenv ("LWP_DISABLE_STACKTRACES");
_lwp_disable_stacktraces = env != NULL;
+ env = getenv ("LWP_NUM_CALLERS");
+ if (env != NULL) {
+ _lwp_num_callers = strtoul (env, NULL, 0);
+ if (_lwp_num_callers == 0)
+ _lwp_disable_stacktraces = TRUE;
+ }
+
DLSYM_DEFINE (malloc);
DLSYM_DEFINE (calloc);
DLSYM_DEFINE (realloc);
@@ -907,6 +987,9 @@ __lwp_init (void)
DLSYM_DEFINE (posix_memalign);
DLSYM_DEFINE (free);
+ DLSYM_DEFINE (dlopen);
+ DLSYM_DEFINE (dlclose);
+
_lwp_perm_alloc (0); /* grab our buffer first */
_lwp_dlcall_initialized = TRUE;