summaryrefslogtreecommitdiff
path: root/coregrind/m_aspacemgr
diff options
context:
space:
mode:
Diffstat (limited to 'coregrind/m_aspacemgr')
-rw-r--r--coregrind/m_aspacemgr/.svn/dir-prop-base9
-rw-r--r--coregrind/m_aspacemgr/.svn/entries77
-rw-r--r--coregrind/m_aspacemgr/.svn/format1
-rw-r--r--coregrind/m_aspacemgr/.svn/prop-base/aspacemgr-linux.c.svn-base9
-rw-r--r--coregrind/m_aspacemgr/.svn/text-base/aspacemgr-aix5.c.svn-base2641
-rw-r--r--coregrind/m_aspacemgr/.svn/text-base/aspacemgr-common.c.svn-base386
-rw-r--r--coregrind/m_aspacemgr/.svn/text-base/aspacemgr-linux.c.svn-base3230
-rw-r--r--coregrind/m_aspacemgr/.svn/text-base/priv_aspacemgr.h.svn-base130
-rw-r--r--coregrind/m_aspacemgr/aspacemgr-aix5.c2641
-rw-r--r--coregrind/m_aspacemgr/aspacemgr-common.c386
-rw-r--r--coregrind/m_aspacemgr/aspacemgr-linux.c3230
-rw-r--r--coregrind/m_aspacemgr/priv_aspacemgr.h130
12 files changed, 12870 insertions, 0 deletions
diff --git a/coregrind/m_aspacemgr/.svn/dir-prop-base b/coregrind/m_aspacemgr/.svn/dir-prop-base
new file mode 100644
index 0000000..67452d4
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/dir-prop-base
@@ -0,0 +1,9 @@
+K 10
+svn:ignore
+V 31
+*.a
+.deps
+Makefile
+Makefile.in
+
+END
diff --git a/coregrind/m_aspacemgr/.svn/entries b/coregrind/m_aspacemgr/.svn/entries
new file mode 100644
index 0000000..64258a8
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/entries
@@ -0,0 +1,77 @@
+8
+
+dir
+9703
+svn://svn.valgrind.org/valgrind/trunk/coregrind/m_aspacemgr
+svn://svn.valgrind.org/valgrind
+
+
+
+2009-03-15T23:25:38.213170Z
+9416
+njn
+has-props
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a5019735-40e9-0310-863c-91ae7b9d1cf9
+
+aspacemgr-linux.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+0712baa49402c7a7c13ac76c79cb8a8d
+2009-03-15T23:25:38.213170Z
+9416
+njn
+has-props
+
+aspacemgr-aix5.c
+file
+
+
+
+
+2009-03-13T17:30:27.000000Z
+452e0f15158dc421c17df93f1150bd5b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+priv_aspacemgr.h
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+82f453de99ebe7bffa5b76dece5c48cd
+2009-03-15T23:25:38.213170Z
+9416
+njn
+
+aspacemgr-common.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+2ede6bf438a5a0c28953e4e58132f41b
+2009-03-15T23:25:38.213170Z
+9416
+njn
+
diff --git a/coregrind/m_aspacemgr/.svn/format b/coregrind/m_aspacemgr/.svn/format
new file mode 100644
index 0000000..45a4fb7
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/format
@@ -0,0 +1 @@
+8
diff --git a/coregrind/m_aspacemgr/.svn/prop-base/aspacemgr-linux.c.svn-base b/coregrind/m_aspacemgr/.svn/prop-base/aspacemgr-linux.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/prop-base/aspacemgr-linux.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-aix5.c.svn-base b/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-aix5.c.svn-base
new file mode 100644
index 0000000..ce529e5
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-aix5.c.svn-base
@@ -0,0 +1,2641 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager: segment initialisation and ---*/
+/*--- tracking, stack operations ---*/
+/*--- ---*/
+/*--- Implementation for AIX5 m_aspacemgr-aix5.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+
+ Neither the names of the U.S. Department of Energy nor the
+ University of California nor the names of its contributors may be
+ used to endorse or promote products derived from this software
+ without prior written permission.
+*/
+
+/* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+ ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
+ AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
+ ************************************************************* */
+
+#include "priv_aspacemgr.h"
+
+
+/* Note: many of the exported functions implemented below are
+ described more fully in comments in pub_core_aspacemgr.h.
+*/
+
+/* This provides a minimal address space management facility for AIX5.
+ It is not as comprehensive, robust or efficient as its Linux
+ counterpart.
+
+ It does implement the advise/notify concept described in
+ aspacemgr-linux.c, but minimally. It only keeps track of the
+ mappings belonging to Valgrind; the client can do what it likes so
+ long as it doesn't trash Valgrind's mappings.
+
+ This is unfortunate, but the root problem is that it is impossible
+ to find out on AIX what the complete set of mappings for a process
+ is. Sure, AIX does have /proc/pid/map, but it's weak compared to
+ Linux's: it just shows some small subset of the mappings, not all
+ of them. So it is not very useful: it can't be used to discover
+ the true initial process mapping state, and it can't be used to
+ cross-check Valgrind's internal mapping table, as is done at
+ --sanity-level=3 and above on Linux.
+*/
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The Address Space Manager's state. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Describes AIX5-specific segment kinds */
+typedef
+ enum {
+ ASkFree=1, // free space
+ ASkMText, // module text (code) mapping
+ ASkMData, // module data (& bss) mapping
+ ASkFileV, // file mapping belonging to valgrind
+ ASkAnonC, // anonymous mapping belonging to the client
+ ASkAnonV, // anonymous mapping belonging to valgrind
+ ASkShmemC, // shm mapping belonging to the client
+ ASkPreAlloc // area preallocated from sbrk
+ }
+ AixSegKind;
+
+/* Segment table entries, in summary:
+
+ ASkFree start end
+ ASkMText start end r w x sibling ismainexe fname mname
+ ASkMData start end r w x sibling
+ FileV start end r w x fname offset
+ AnonC start end r w x fromP isCH
+ AnonV start end r w x fromP
+ ShmemC start end r w x
+ PreAlloc start end
+
+ Entries are non-overlapping and cover the entire address space
+ exactly (as in the Linux aspacem). Unlike Linux there are no
+ alignment constraints, since we're just recording what's going on,
+ rather than controlling it.
+
+ MText/MData are XCOFF mapped modules, as determined by looking at
+ /proc/../map. MText is the primary entry and contains the text
+ range. MData contains the data range, if the module has a data
+ mapping (usually but not always). MText also holds the avma of the
+ corresponding data segment start, if any, (sibling field) so it can
+ be found and the two added/removed together. Similarly MData
+ contains the address of the corresponding MText (also sibling).
+
+ fname/mname only apply to MText. To find the fname/mname for MData
+ you have to look at the corresponding MText entry, which is
+ guaranteed to exist. MText may exist without a corresponding MData
+ but not vice versa. Kludge: in fact fname/mname have to be
+ allowed in MData, else read_procselfmap doesn't work.
+
+ MText may have a zero sibling pointer, indicating that there is no
+ corresponding MData. But MData must have a nonzero sibling pointer
+ since MData without MText is not allowed. Implication is that
+ neither MText nor MData may be mapped at zero as this would mess up
+ the representation, but I don't think that will ever happen since
+ AIX uses page zero as a readonly const-zero area.
+
+ For MData entries, the data section size acquired from /proc/../map
+ appears to also include the bss, so there is no need for any
+ further handling of that.
+
+ isCH indicates whether an AnonC area is part of the client heap
+ or not. May not be set for any other kind of area.
+
+ File and member names are entries into the string table.
+
+ fromP, for AnonC/AnonV, if True, indicates that the segment was
+ allocated from a PreAlloc area, and so should be returned to that
+ state upon deallocation. If False, indicates that the segment
+ should be unmapped on deallocation.
+*/
+typedef
+ struct {
+ AixSegKind kind;
+
+ /* ALL: extent */
+ /* Note: zero-length segments are not allowed. That guarantees
+ that start <= end. */
+ Addr start; // lowest addr in range (ALL)
+ Addr end; // highest addr in range (ALL)
+
+ /* ALL except Free */
+ Bool hasR;
+ Bool hasW;
+ Bool hasX;
+
+ /* misc */
+ Addr sibling; // MText, MData only: addr of MData/MText
+ Bool isMainExe; // MText only: is this the main executable?
+ Bool isCH; // AnonC only: is this part of the client's heap?
+ Bool fromP; // AnonC, AnonV only: originated from PreAlloc?
+ UChar* fname; // MText, FileV only: filename
+ UChar* mname; // MText only: member name if present
+ Off64T offset; // FileV only: file offset
+ }
+ AixSegment;
+
+
+#define VG_N_ASEGMENTS 5000
+
+typedef
+ struct {
+ AixSegment seg[VG_N_ASEGMENTS];
+ Int used;
+ }
+ AixSegments;
+
+
+/* ------ start of STATE for the address-space manager ------ */
+
+/* A table of zero-terminated strings (file names etc). This
+ is only ever added to. */
+
+#define VG_N_ASTRTAB 200000
+static Int strtab_used = 0;
+static UChar strtab[VG_N_ASTRTAB];
+
+#define Addr_MIN ((Addr)0)
+#define Addr_MAX ((Addr)(-1ULL))
+
+/* The main array of AixSegments, in order as required. */
+
+static AixSegments asegs_pri;
+
+/* and two auxiliary arrays. */
+
+static AixSegments asegs_tnew;
+static AixSegments asegs_told;
+
+/* The assumed size of the main thread's stack, so that we can add a
+ segment for it at startup. */
+
+#define N_FAKE_STACK_PAGES_MIN 4096 /* 16M fake stack */ /* default size */
+#define N_FAKE_STACK_PAGES_MAX 32768 /* 128M fake stack */ /* max size? */
+
+
+/* Hacks which are probably for AIX 'millicode'. Note: ensure
+ these stay page aligned. */
+
+#define MAGIC_PAGES_1_BASE 0x3000
+#define MAGIC_PAGES_1_SIZE (2*0x1000)
+
+#define MAGIC_PAGES_2_BASE 0xC000
+#define MAGIC_PAGES_2_SIZE (4*0x1000)
+
+
+#define AM_SANITY_CHECK(_who) \
+ do { \
+ if (VG_(clo_sanity_level >= 3)) { \
+ Bool ok = sane_AixSegments(&asegs_pri); \
+ if (!ok) \
+ VG_(debugLog)(0,"aspace", "sanity check failed, " \
+ "who = %s\n", _who); \
+ aspacem_assert(ok); \
+ } \
+ } while (0)
+
+/* When preallocating a block from sbrk-world, how much extra
+ should we pre-emptively acquire? */
+
+//#define AM_PREALLOC_EXTRA (512 * 1024)
+//#define AM_PREALLOC_EXTRA 0x0800000 /* 8 M */
+#define AM_PREALLOC_EXTRA 0x4000000 /* 64 M */
+
+/* The AIX5 aspacem implementation needs to be told when it is and
+ isn't allowed to use sbrk to allocate memory. Hence: */
+Bool VG_(am_aix5_sbrk_allowed) = True;
+
+/* ------ end of STATE for the address-space manager ------ */
+
+/* ------ Forwards decls ------ */
+static void parse_procselfmap ( /*OUT*/ AixSegments* );
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Stuff for 4K (small-page-size) rounding. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+#define AM_4K_PAGESZ 4096
+
+static Bool AM_IS_4K_ALIGNED ( UWord w )
+{
+ UWord m = AM_4K_PAGESZ-1;
+ return toBool( (w & m) == 0 );
+}
+
+static UWord AM_4K_ROUNDUP ( UWord w )
+{
+ UWord m = AM_4K_PAGESZ-1;
+ return (w+m) & (~m);
+}
+
+static UWord AM_64K_ROUNDUP ( UWord w )
+{
+ UWord m = 0x10000-1;
+ return (w+m) & (~m);
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- String table management. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Add the given string into the string table (or find an existing
+ copy of it) and return a pointer to the in-table version. The
+ pointer will be valid for the entire rest of the run. */
+
+static UChar* add_to_strtab ( UChar* str )
+{
+ Int off, len;
+ /* First, look for the string. */
+ off = 0;
+ while (off < strtab_used) {
+ if (0 == VG_(strcmp)(str, &strtab[off]))
+ return &strtab[off];
+ off += VG_(strlen)(&strtab[off]) + 1;
+ }
+ /* not present? we'll have to copy it then. */
+ len = VG_(strlen)(str);
+ if (len + 1 + strtab_used > VG_N_ASTRTAB)
+ ML_(am_barf_toolow)("VG_N_ASTRTAB");
+ off = strtab_used;
+ for (; *str; str++)
+ strtab[strtab_used++] = *str;
+ strtab[strtab_used++] = 0;
+ aspacem_assert(strtab_used <= VG_N_ASTRTAB);
+ return &strtab[off];
+}
+
+
+static Bool is_in_strtab ( UChar* str )
+{
+ if (str < &strtab[0])
+ return False;
+ if (str >= &strtab[strtab_used])
+ return False;
+ if (str > &strtab[0] && str[-1] != 0)
+ return False;
+ return True;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Low level AixSegment stuff. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static void init_AixSegment ( AixSegment* s )
+{
+ s->kind = 0; /* invalid */
+ s->start = 0;
+ s->end = 0;
+ s->hasR = False;
+ s->hasW = False;
+ s->hasX = False;
+ s->sibling = 0;
+ s->isMainExe = False;
+ s->isCH = False;
+ s->fromP = False;
+ s->fname = NULL;
+ s->mname = NULL;
+ s->offset = 0;
+}
+
+
+static HChar* name_of_AixSegKind ( AixSegKind sk )
+{
+ switch (sk) {
+ case ASkFree: return "Free ";
+ case ASkMText: return "MText";
+ case ASkMData: return "MData";
+ case ASkAnonV: return "AnonV";
+ case ASkAnonC: return "AnonC";
+ case ASkFileV: return "FileV";
+ case ASkShmemC: return "ShmC ";
+ case ASkPreAlloc: return "PreAl";
+ default: ML_(am_barf)("name_of_AixSegKind");
+ /*NOTREACHED*/
+ return NULL;
+ }
+}
+
+
+static
+void show_AixSegment ( Int logLevel, Int segNo, AixSegment* seg )
+{
+ HChar* segName = name_of_AixSegKind( seg->kind );
+ switch (seg->kind) {
+ case ASkFree:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx\n",
+ segNo, /*segName*/" ",
+ (ULong)seg->start, (ULong)seg->end
+ );
+ break;
+ case ASkMText:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- (d %010llx) %s%s%s%s\n",
+ segNo, seg->isMainExe ? "MTEXT" : "MText",
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ (ULong)seg->sibling,
+ seg->fname,
+ seg->mname ? "(" : "",
+ seg->mname ? (HChar*)seg->mname : "",
+ seg->mname ? ")" : ""
+ );
+ break;
+ case ASkMData:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- (t %010llx)\n",
+ segNo, "MData",
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ (ULong)seg->sibling
+ );
+ break;
+ case ASkFileV:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- %6lld %s\n",
+ segNo, segName,
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ seg->offset,
+ seg->fname
+ );
+ break;
+ case ASkAnonV:
+ case ASkAnonC:
+ case ASkShmemC:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c%c%c\n",
+ segNo, segName,
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ seg->kind==ASkAnonC && seg->isCH ? 'H' : '-',
+ seg->fromP ? 'P' : '-'
+ );
+ break;
+ case ASkPreAlloc:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- (size %llu)\n",
+ segNo, segName,
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ (ULong)seg->end - (ULong)seg->start + 1
+ );
+ break;
+ default:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: show_AixSegment: unknown segment\n",
+ segNo);
+ break;
+ }
+}
+
+
+static void init_AixSegments ( AixSegments* segs )
+{
+ segs->used = 1;
+ init_AixSegment( &segs->seg[0] );
+ segs->seg[0].kind = ASkFree;
+ segs->seg[0].start = Addr_MIN;
+ segs->seg[0].end = Addr_MAX;
+}
+
+
+static
+void show_AixSegments ( Int logLevel, HChar* who, AixSegments* segs )
+{
+ Int i;
+ VG_(debugLog)(logLevel, "aspacem", "<<< %s\n", who);
+ for (i = 0; i < segs->used; i++)
+ show_AixSegment( logLevel, i, &segs->seg[i] );
+ VG_(debugLog)(logLevel, "aspacem", ">>>\n");
+}
+
+
+static Bool sane_AixSegment ( AixSegment* seg )
+{
+ /* disallow zero and negative length segments */
+ if (seg->end < seg->start)
+ return False;
+
+ switch (seg->kind) {
+ case ASkFree:
+ if (seg->hasR || seg->hasW || seg->hasX)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0 || seg->offset != 0)
+ return False;
+ if (seg->fname || seg->mname)
+ return False;
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkMText:
+ if (!is_in_strtab(seg->fname))
+ return False;
+ if (seg->mname && !is_in_strtab(seg->mname))
+ return False;
+ if (seg->offset != 0)
+ return False;
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkMData:
+ if (seg->isMainExe || seg->sibling == 0 || seg->offset != 0)
+ return False;
+ /* fname/mname have to be allowed in MData, else
+ read_procselfmap doesn't work. Unfortunately. */
+ /*
+ if (seg->fname || seg->mname)
+ return False;
+ */
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkFileV:
+ if (!is_in_strtab(seg->fname))
+ return False;
+ if (seg->mname != NULL)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0)
+ return False;
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkShmemC:
+ case ASkAnonV:
+ case ASkAnonC:
+ if (seg->fname || seg->mname)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0)
+ return False;
+ if (seg->offset != 0)
+ return False;
+ if (seg->kind != ASkAnonC && seg->isCH)
+ return False;
+ if ( (!(seg->kind == ASkAnonV || seg->kind == ASkAnonC))
+ && seg->fromP)
+ return False;
+ break;
+ case ASkPreAlloc:
+ if (seg->fname || seg->mname)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0)
+ return False;
+ if (seg->offset != 0)
+ return False;
+ if (seg->kind != ASkAnonC && seg->isCH)
+ return False;
+ if (seg->fromP)
+ return False;
+ if (!AM_IS_4K_ALIGNED(seg->start))
+ return False;
+ if (!AM_IS_4K_ALIGNED(seg->end + 1))
+ return False;
+ if (!(seg->hasR && seg->hasW && seg->hasX))
+ return False;
+ break;
+ default:
+ return False;
+ }
+ return True;
+}
+
+
+/* Binary search the interval array for a given address. Since the
+ array covers the entire address space the search cannot fail. */
+static Int find_asegment_idx ( AixSegments* segs, Addr a )
+{
+ Addr a_mid_lo, a_mid_hi;
+ Int mid,
+ lo = 0,
+ hi = segs->used-1;
+ aspacem_assert(lo <= hi);
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) {
+ /* Not found. This can't happen. */
+ ML_(am_barf)("find_nsegment_idx: not found");
+ }
+ mid = (lo + hi) / 2;
+ a_mid_lo = segs->seg[mid].start;
+ a_mid_hi = segs->seg[mid].end;
+
+ if (a < a_mid_lo) { hi = mid-1; continue; }
+ if (a > a_mid_hi) { lo = mid+1; continue; }
+ aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
+ aspacem_assert(0 <= mid && mid < segs->used);
+ return mid;
+ }
+}
+
+
+static Bool sane_AixSegments ( AixSegments* segs )
+{
+ Int i;
+
+ /* Check endpoints */
+ if (segs->used < 1 || segs->used > VG_N_ASEGMENTS) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad ->used");
+ return False;
+ }
+ if (segs->seg[0].start != Addr_MIN
+ || segs->seg[segs->used-1].end != Addr_MAX) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad endpoints");
+ return False;
+ }
+
+ /* Check each segment, and check entire range is covered. */
+ for (i = 0; i < segs->used; i++) {
+ if (!sane_AixSegment( &segs->seg[i] )) {
+ VG_(debugLog)(0, "aspacem",
+ "sane_AixSegments: bad segment %d\n", i);
+ return False;
+ }
+ }
+ for (i = 1; i < segs->used; i++) {
+ if (segs->seg[i-1].end + 1 != segs->seg[i].start) {
+ VG_(debugLog)(0, "aspacem",
+ "sane_AixSegments: bad transition at %d/%d\n", i-1,i);
+ return False;
+ }
+ }
+
+ /* Now we know 'seg' is safe for use in find_asegment_idx().
+ Check the sibling pointers for MText/MData.
+
+ Also check that the segment starting at address zero is neither
+ MText nor MData (since this would mess up the sibling pointer
+ representation; see comments above.) Failure of this is not per
+ se a logic failure, but it does indicate that the kernel
+ unexpectedly placed MText or MData at zero, and our
+ representation is therefore inadequate.
+ */
+ if (segs->seg[0].kind == ASkMText || segs->seg[0].kind == ASkMData) {
+ VG_(debugLog)(0, "aspacem",
+ "sane_AixSegments: ASkMText/ASkMData at address zero\n");
+ return False;
+ }
+
+ for (i = 0; i < segs->used-1; i++) {
+
+ AixSegment *s1, *s2;
+
+ s1 = &segs->seg[i];
+
+ if (s1->kind == ASkMData) {
+ s2 = &segs->seg[ find_asegment_idx(segs, s1->sibling) ];
+ if (s2->kind != ASkMText
+ || find_asegment_idx(segs, s2->sibling) != i) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad sibling "
+ "link(s) for ASkData\n");
+ return False;
+ }
+ }
+
+ if (s1->kind == ASkMText && s1->sibling != 0) {
+ s2 = &segs->seg[ find_asegment_idx(segs, s1->sibling) ];
+ if (s2->kind != ASkMData
+ || find_asegment_idx(segs, s2->sibling) != i) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad sibling "
+ "link(s) for ASkText\n");
+ return False;
+ }
+ }
+
+ }
+
+ return True;
+}
+
+
+/* Try merging s2 into s1, if possible. If successful, s1 is
+ modified, and True is returned. Otherwise s1 is unchanged and
+ False is returned. */
+
+static Bool maybe_merge_asegments ( AixSegment* s1, AixSegment* s2 )
+{
+ if (s1->kind != s2->kind)
+ return False;
+
+ if (s1->end+1 != s2->start)
+ return False;
+
+ switch (s1->kind) {
+
+ case ASkFree:
+ s1->end = s2->end;
+ return True;
+
+ case ASkAnonC:
+ case ASkAnonV:
+ if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
+ && s1->hasX == s2->hasX && s1->isCH == s2->isCH
+ && s1->fromP == s2->fromP) {
+ s1->end = s2->end;
+ return True;
+ }
+ break;
+
+ /* not really necessary, but .. */
+ case SkFileV:
+ if (s1->hasR == s2->hasR
+ && s1->hasW == s2->hasW && s1->hasX == s2->hasX
+ && s1->fname == s2->fname
+ && s2->offset == s1->offset
+ + ((ULong)s2->start) - ((ULong)s1->start) ) {
+ s1->end = s2->end;
+ return True;
+ }
+ break;
+
+ /* it's important to merge PreAlloc's back together to avoid
+ fragmenting PreAlloc'd space unnecessarily */
+ case ASkPreAlloc:
+ s1->end = s2->end;
+ return True;
+
+ default:
+ break;
+ }
+
+ return False;
+}
+
+
+/* Merge mergable segments in SEGS. */
+
+static void preen_asegments ( AixSegments* segs )
+{
+ Int r, w;
+
+ aspacem_assert(segs->used >= 1);
+ if (segs->used == 1)
+ return;
+
+ w = 0;
+ for (r = 1; r < segs->used; r++) {
+ if (maybe_merge_asegments(&segs->seg[w], &segs->seg[r])) {
+ /* nothing */
+ } else {
+ w++;
+ if (w != r)
+ segs->seg[w] = segs->seg[r];
+ }
+ }
+ w++;
+ aspacem_assert(w > 0 && w <= segs->used);
+ segs->used = w;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Modifying a segment array, and constructing segments. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Split the segment containing 'a' into two, so that 'a' is
+ guaranteed to be the start of a new segment. If 'a' is already the
+ start of a segment, do nothing. */
+
+static void split_asegment_at ( AixSegments* segs, Addr a )
+{
+ Int i, j;
+
+ aspacem_assert(a > 0);
+ aspacem_assert(segs->used >= 1);
+
+ i = find_asegment_idx(segs, a);
+ aspacem_assert(i >= 0 && i < segs->used);
+
+ if (segs->seg[i].start == a)
+ /* 'a' is already the start point of a segment, so nothing to be
+ done. */
+ return;
+
+ /* else we have to slide the segments upwards to make a hole */
+ if (segs->used >= VG_N_ASEGMENTS)
+ ML_(am_barf_toolow)("VG_N_ASEGMENTS");
+ for (j = segs->used-1; j > i; j--)
+ segs->seg[j+1] = segs->seg[j];
+ segs->used++;
+
+ segs->seg[i+1] = segs->seg[i];
+ segs->seg[i+1].start = a;
+ segs->seg[i].end = a-1;
+
+ if (segs->seg[i].kind == ASkFileV /* || segs->seg[i].kind == ASkFileC*/)
+ segs->seg[i+1].offset
+ += ((ULong)segs->seg[i+1].start) - ((ULong)segs->seg[i].start);
+
+ aspacem_assert(sane_AixSegment(&segs->seg[i]));
+ aspacem_assert(sane_AixSegment(&segs->seg[i+1]));
+}
+
+
+/* Do the minimum amount of segment splitting necessary to ensure that
+ sLo is the first address denoted by some segment and sHi is the
+ highest address denoted by some other segment. Returns the indices
+ of the lowest and highest segments in the range. */
+
+static
+void split_asegments_lo_and_hi ( AixSegments* segs,
+ Addr sLo, Addr sHi,
+ /*OUT*/Int* iLo,
+ /*OUT*/Int* iHi )
+{
+ aspacem_assert(sLo < sHi);
+
+ if (sLo > 0)
+ split_asegment_at(segs, sLo);
+ if (sHi < Addr_MAX)
+ split_asegment_at(segs, sHi+1);
+
+ *iLo = find_asegment_idx(segs,sLo);
+ *iHi = find_asegment_idx(segs,sHi);
+ aspacem_assert(0 <= *iLo && *iLo < segs->used);
+ aspacem_assert(0 <= *iHi && *iHi < segs->used);
+ aspacem_assert(*iLo <= *iHi);
+ aspacem_assert(segs->seg[*iLo].start == sLo);
+ aspacem_assert(segs->seg[*iHi].end == sHi);
+ /* Not that I'm overly paranoid or anything, definitely not :-) */
+}
+
+
+/* Add SEG to the collection, deleting/truncating any it overlaps.
+ This deals with all the tricky cases of splitting up segments as
+ needed. Contents of SEG are copied. */
+
+static void add_asegment ( AixSegments* segs, AixSegment* seg )
+{
+ Int i, iLo, iHi, delta;
+ Bool segment_is_sane;
+
+ Addr sStart = seg->start;
+ Addr sEnd = seg->end;
+
+ aspacem_assert(sStart <= sEnd);
+
+ segment_is_sane = sane_AixSegment(seg);
+ if (!segment_is_sane) show_AixSegment(0,0,seg);
+ aspacem_assert(segment_is_sane);
+
+ split_asegments_lo_and_hi( segs, sStart, sEnd, &iLo, &iHi );
+
+ /* Now iLo .. iHi inclusive is the range of segment indices which
+ seg will replace. If we're replacing more than one segment,
+ slide those above the range down to fill the hole. */
+ delta = iHi - iLo;
+ aspacem_assert(delta >= 0);
+ if (delta > 0) {
+ for (i = iLo; i < segs->used-delta; i++)
+ segs->seg[i] = segs->seg[i+delta];
+ segs->used -= delta;
+ }
+ aspacem_assert(segs->used >= 1);
+
+ segs->seg[iLo] = *seg;
+
+ preen_asegments(segs);
+ if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
+}
+
+
+/* Convert everything in SEG except MData and MText into Free,
+ then preen, so as to retain normalised form. */
+
+static void knockout_non_module_segs ( AixSegments* segs )
+{
+ Int i;
+ Addr s, e;
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind == ASkFree
+ || segs->seg[i].kind == ASkMText
+ || segs->seg[i].kind == ASkMData)
+ continue;
+ s = segs->seg[i].start;
+ e = segs->seg[i].end;
+ init_AixSegment( &segs->seg[i] );
+ segs->seg[i].start = s;
+ segs->seg[i].end = e;
+ segs->seg[i].kind = ASkFree;
+ }
+ preen_asegments(segs);
+ aspacem_assert( sane_AixSegments(segs) );
+}
+
+
+/* Copy a segment array. */
+
+static void copy_asegments_d_s ( AixSegments* dst, AixSegments* src )
+{
+ Int i;
+ aspacem_assert(src->used >= 1 && src->used < VG_N_ASEGMENTS);
+ dst->used = src->used;
+ for (i = 0; i < src->used; i++)
+ dst->seg[i] = src->seg[i];
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Re-reading /proc/../map and updating MText/MData segments ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Find out the size of the AixCodeSegChange that must be
+ presented to VG_(am_aix5_reread_procmap). */
+
+Int VG_(am_aix5_reread_procmap_howmany_directives)(void)
+{
+ /* In the worst imaginable case, all the tracked modules could have
+ disappeared and been replaced with different ones. Hence: */
+ return 2 * VG_N_ASEGMENTS;
+}
+
+
+static
+void add_pri_text_and_data_segs ( AixSegment* tnew, AixSegment* dnew )
+{
+ Bool dExists = (dnew->end - dnew->start + 1) != 0;
+ aspacem_assert(tnew->kind == ASkMText);
+ aspacem_assert(dnew->kind == ASkMData);
+ if (dExists) {
+ aspacem_assert(tnew->sibling == dnew->start);
+ aspacem_assert(dnew->sibling == tnew->start);
+ add_asegment(&asegs_pri, tnew);
+ add_asegment(&asegs_pri, dnew);
+ } else {
+ aspacem_assert(tnew->sibling == 0);
+ add_asegment(&asegs_pri, tnew);
+ }
+}
+
+static
+void del_pri_text_and_data_segs ( AixSegment* told, AixSegment* dold )
+{
+ AixSegment fre;
+ Bool dExists = (dold->end - dold->start + 1) != 0;
+ aspacem_assert(told->kind == ASkMText);
+ aspacem_assert(dold->kind == ASkMData);
+ init_AixSegment( &fre );
+ fre.kind = ASkFree;
+ if (dExists) {
+ aspacem_assert(told->sibling == dold->start);
+ aspacem_assert(dold->sibling == told->start);
+ fre.start = told->start;
+ fre.end = told->end;
+ add_asegment(&asegs_pri, &fre);
+ fre.start = dold->start;
+ fre.end = dold->end;
+ add_asegment(&asegs_pri, &fre);
+ } else {
+ aspacem_assert(told->sibling == 0);
+ fre.start = told->start;
+ fre.end = told->end;
+ add_asegment(&asegs_pri, &fre);
+ }
+}
+
+
+/* Tell aspacem that /proc/<pid>/map may have changed (eg following
+ __loadx) and so it should be re-read, and the code/data segment
+ list updated accordingly. The resulting array of AixCodeChangeSeg
+ directives are written to 'directives', and the number of entries
+ to *ndirectives. */
+
+void VG_(am_aix5_reread_procmap)
+ ( /*OUT*/AixCodeSegChange* directives, /*OUT*/Int* ndirectives )
+{
+ Int ixold, ixnew;
+ Bool done_old, done_new;
+ AixSegment *olds, *news;
+
+ /* First, read /proc/../map into asegs_tnew. Copy asegs_pri into
+ asegs_told, and remove everything except MData and MText, so as
+ to generate something we can sanely compare with asegs_tnew.
+ Walk asegs_told and asegs_tnew together, writing the differences
+ to 'directives', and modifying asegs_pri accordingly. */
+ parse_procselfmap( &asegs_tnew );
+ copy_asegments_d_s( &asegs_told, &asegs_pri );
+ knockout_non_module_segs( &asegs_told );
+
+ *ndirectives = 0;
+
+# define MODIFY_PRI(_dir, _asegs, _ixt, _acquire) \
+ do { \
+ Int _ixd; \
+ AixSegment *_segt, *_segd; \
+ AixSegment _segd_dummy; \
+ aspacem_assert(_ixt >= 0 && _ixt < _asegs.used); \
+ _segt = &_asegs.seg[_ixt]; \
+ aspacem_assert(_segt->kind == ASkMText); \
+ if (_segt->sibling) { \
+ _ixd = find_asegment_idx( &_asegs, _segt->sibling ); \
+ _segd = &_asegs.seg[_ixd]; \
+ aspacem_assert(_segd->kind == ASkMData); \
+ aspacem_assert(_segt->sibling == _segd->start); \
+ } else { \
+ init_AixSegment( &_segd_dummy ); \
+ _segd_dummy.kind = ASkMData; \
+ _segd_dummy.start = 1; \
+ _segd_dummy.end = 0; \
+ _segd = &_segd_dummy; \
+ } \
+ if (_segd != &_segd_dummy) \
+ aspacem_assert(_segd->sibling == _segt->start); \
+ \
+ (_dir).code_start = (_segt)->start; \
+ (_dir).code_len = (_segt)->end - (_segt)->start + 1; \
+ (_dir).data_start = (_segd)->start; \
+ (_dir).data_len = (_segd)->end - (_segd)->start + 1; \
+ (_dir).file_name = (_segt)->fname; \
+ (_dir).mem_name = (_segt)->mname; \
+ (_dir).is_mainexe = (_acquire) ? (_segt)->isMainExe : False; \
+ (_dir).acquire = (_acquire); \
+ \
+ if (_acquire) { \
+ add_pri_text_and_data_segs( _segt, _segd ); \
+ } else { \
+ del_pri_text_and_data_segs( _segt, _segd ); \
+ } \
+ } while (0)
+
+ ixold = 0; /* indexes asegs_told */
+ ixnew = 0; /* indexes asegs_tnew */
+
+ while (True) {
+
+ aspacem_assert(ixold >= 0 && ixold < asegs_told.used);
+ aspacem_assert(ixnew >= 0 && ixnew < asegs_tnew.used);
+
+ /* Advance ixold and ixnew to the next MText in their
+ respective arrays. */
+ while (ixold < asegs_told.used
+ && asegs_told.seg[ixold].kind != ASkMText) {
+ aspacem_assert(asegs_told.seg[ixold].kind == ASkFree
+ || asegs_told.seg[ixold].kind == ASkMData);
+ ixold++;
+ }
+ while (ixnew < asegs_tnew.used
+ && asegs_tnew.seg[ixnew].kind != ASkMText) {
+ aspacem_assert(asegs_tnew.seg[ixnew].kind == ASkFree
+ || asegs_tnew.seg[ixnew].kind == ASkMData);
+ ixnew++;
+ }
+
+ aspacem_assert(ixold >= 0 && ixold <= asegs_told.used);
+ aspacem_assert(ixnew >= 0 && ixnew <= asegs_tnew.used);
+
+ done_old = ixold == asegs_told.used;
+ done_new = ixnew == asegs_tnew.used;
+
+ if (done_old && done_new)
+ goto both_done;
+ if (done_old && !done_new)
+ goto finishup_new;
+ if (done_new && !done_old)
+ goto finishup_old;
+
+ olds = &asegs_told.seg[ixold];
+ news = &asegs_tnew.seg[ixnew];
+
+ aspacem_assert(olds->kind == ASkMText);
+ aspacem_assert(news->kind == ASkMText);
+
+ if (0) {
+ show_AixSegment(0,ixold,&asegs_told.seg[ixold]);
+ show_AixSegment(0,ixnew,&asegs_tnew.seg[ixnew]);
+ VG_(debugLog)(0, "aspacem", "\n");
+ }
+
+ /* Here, if olds->start < news->start, then the old sequence has
+ an entry which the new one doesn't, so a module has been
+ unloaded. If news->start < olds->start then the new sequence
+ has a module the old one doesn't, so a module has been
+ loaded. If news->start ==olds->start then the module is
+ unchanged. Except, we should check a bit more carefully in
+ the zero case. */
+ if (olds->start == news->start) {
+ if (olds->start == news->start
+ && olds->end == news->end
+ && olds->fname == news->fname
+ && olds->mname == news->mname
+ && olds->sibling == news->sibling
+ && olds->isMainExe == news->isMainExe) {
+ /* really identical, do nothing */
+ } else {
+ /* Dubious; mark it as an unload of old and load of
+ new. */
+ MODIFY_PRI(directives[*ndirectives], asegs_told, ixold, False);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ MODIFY_PRI(directives[*ndirectives], asegs_tnew, ixnew, True);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ }
+ ixold++;
+ ixnew++;
+ continue;
+ }
+
+ if (olds->start < news->start) {
+ /* discard olds */
+ MODIFY_PRI(directives[*ndirectives], asegs_told, ixold, False);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ ixold++;
+ continue;
+ }
+
+ if (news->start < olds->start) {
+ /* acquire news */
+ MODIFY_PRI(directives[*ndirectives], asegs_tnew, ixnew, True);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ ixnew++;
+ continue;
+ }
+ /* NOTREACHED */
+ aspacem_assert(0);
+ }
+
+ finishup_new:
+ olds = NULL;
+ aspacem_assert(ixold == asegs_told.used);
+ aspacem_assert(ixnew < asegs_tnew.used);
+ while (ixnew < asegs_tnew.used) {
+ news = &asegs_tnew.seg[ixnew];
+ aspacem_assert(news->kind == ASkMText || news->kind == ASkMData
+ || news->kind == ASkFree);
+ if (news->kind == ASkMText) {
+ MODIFY_PRI(directives[*ndirectives], asegs_tnew, ixnew, True);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ }
+ ixnew++;
+ }
+ goto both_done;
+
+ finishup_old:
+ news = NULL;
+ aspacem_assert(ixnew == asegs_tnew.used);
+ aspacem_assert(ixold < asegs_told.used);
+ while (ixold < asegs_told.used) {
+ olds = &asegs_told.seg[ixold];
+ aspacem_assert(olds->kind == ASkMText || olds->kind == ASkMData
+ || olds->kind == ASkFree);
+ if (olds->kind == ASkMText) {
+ MODIFY_PRI(directives[*ndirectives], asegs_told, ixold, False);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ }
+ ixold++;
+ }
+ goto both_done;
+
+ both_done:
+ aspacem_assert(ixold == asegs_told.used);
+ aspacem_assert(ixnew == asegs_tnew.used);
+
+ asegs_tnew.used = 0;
+ asegs_told.used = 0;
+
+ aspacem_assert( sane_AixSegments(&asegs_pri) );
+
+# undef MODIFY_PRI
+}
+
+
+/* Set the initial stack segment. Contains kludgery. Also take the
+ opportunity to create fake segs for the millicode areas. */
+
+void VG_(am_aix5_set_initial_client_sp)( Addr sp )
+{
+ static Bool done = False;
+ AixSegment seg;
+ Word n_fake_stack_pages;
+ Word m1 = 1048576;
+
+ aspacem_assert(!done);
+ done = True;
+
+ /* We are given the initial client SP (that of the root thread).
+ Already on the stack are argv and env. How far up does it
+ extend? We assume to the next 64k boundary. How far down does
+ it extend? We assume N_FAKE_STACK_PAGES small pages - by
+ default 16M. Establish those limits and add an AnonC rwx
+ segment. */
+
+ /* The 64k boundary is "justified" as follows. On 32-bit AIX 5.3,
+ a typical initial SP is 0x2FF22xxx, but the accessible (rw) area
+ beyond that extends up to 0x2FF2FFFF - the next 64k boundary.
+ In 64-bit mode, a typical initial SP might be
+ 0xFFF'FFFF'FFFF'E920, and the accessible area extends to
+ 0xFFF'FFFF'FFFF'FFFF. So in both cases, (64k roundup of sp) - 1
+ gives the end of the accessible area. */
+ VG_(debugLog)(1,"aspacem", "aix5_set_initial_client_sp( %p )\n",
+ (void*)sp);
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.hasR = seg.hasW = seg.hasX = True;
+
+ if (sizeof(void*) == 4
+ && ((sp & 0xFFFF0000) == 0x2FF20000
+ || (sp & 0xFFFF0000) == 0x2FF10000)) {
+ /* Gaaah. Special-case 32-bit mode. */
+ seg.end = 0x2FF2FFFF;
+ } else {
+ seg.end = AM_64K_ROUNDUP(sp) - 1;
+ }
+
+ n_fake_stack_pages = N_FAKE_STACK_PAGES_MIN;
+ if (VG_(clo_main_stacksize) > 0
+ && ((m1+VG_(clo_main_stacksize)) / VKI_PAGE_SIZE) > n_fake_stack_pages) {
+ n_fake_stack_pages = (m1+VG_(clo_main_stacksize)) / VKI_PAGE_SIZE;
+ }
+ if (n_fake_stack_pages > N_FAKE_STACK_PAGES_MAX) {
+ /* Allocation of the stack failed. We have to stop. */
+ VG_(debugLog)(
+ 0, "aspacem",
+ "valgrind: "
+ "I failed to allocate space for the application's stack.\n");
+ VG_(debugLog)(
+ 0, "aspacem",
+ "valgrind: "
+ "This may be the result of a very large --max-stackframe=\n");
+ VG_(debugLog)(
+ 0, "aspacem",
+ "valgrind: "
+ "setting. Cannot continue. Sorry.\n\n");
+ ML_(am_exit)(0);
+ }
+
+ seg.start = seg.end+1 - n_fake_stack_pages * VKI_PAGE_SIZE;
+
+ VG_(debugLog)(1,"aspacem", "aix5_set_initial_client_sp: stack seg:\n");
+ show_AixSegment(1,0, &seg);
+ add_asegment( &asegs_pri, &seg );
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.hasR = seg.hasX = True;
+ seg.start = MAGIC_PAGES_1_BASE;
+ seg.end = MAGIC_PAGES_1_BASE + MAGIC_PAGES_1_SIZE - 1;
+ VG_(debugLog)(1,"aspacem", "am_aix5_set_initial_client_sp: FAKE1 seg:\n");
+ show_AixSegment(1,0, &seg);
+ add_asegment( &asegs_pri, &seg );
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.hasR = seg.hasX = True;
+ seg.start = MAGIC_PAGES_2_BASE;
+ seg.end = MAGIC_PAGES_2_BASE + MAGIC_PAGES_2_SIZE - 1;
+ VG_(debugLog)(1,"aspacem", "am_aix5_set_initial_client_sp: FAKE2 seg:\n");
+ show_AixSegment(1,0, &seg);
+ add_asegment( &asegs_pri, &seg );
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Getting segment-starts. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Print out the segment array (debugging only!). */
+void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
+{
+ show_AixSegments( logLevel, who, &asegs_pri );
+}
+
+/* Get the filename corresponding to this segment, if known and if it
+ has one. The returned name's storage cannot be assumed to be
+ persistent, so the caller should immediately copy the name
+ elsewhere. On AIX5, we don't know what this is (in general)
+ so just return NULL. */
+HChar* VG_(am_get_filename)( NSegment const* seg )
+{
+ return NULL;
+}
+
+/* Collect up the start addresses of all non-free, non-resvn segments.
+ The interface is a bit strange in order to avoid potential
+ segment-creation races caused by dynamic allocation of the result
+ buffer *starts.
+
+ The function first computes how many entries in the result
+ buffer *starts will be needed. If this number <= nStarts,
+ they are placed in starts[0..], and the number is returned.
+ If nStarts is not large enough, nothing is written to
+ starts[0..], and the negation of the size is returned.
+
+ Correct use of this function may mean calling it multiple times in
+ order to establish a suitably-sized buffer. */
+
+Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
+{
+ Int i, j, nSegs;
+
+ /* don't pass dumbass arguments */
+ aspacem_assert(nStarts >= 0);
+
+ nSegs = 0;
+ for (i = 0; i < asegs_pri.used; i++) {
+ if (asegs_pri.seg[i].kind == ASkFree
+ || asegs_pri.seg[i].kind == ASkPreAlloc)
+ continue;
+ nSegs++;
+ }
+
+ if (nSegs > nStarts) {
+ /* The buffer isn't big enough. Tell the caller how big it needs
+ to be. */
+ return -nSegs;
+ }
+
+ /* There's enough space. So write into the result buffer. */
+ aspacem_assert(nSegs <= nStarts);
+
+ j = 0;
+ for (i = 0; i < asegs_pri.used; i++) {
+ if (asegs_pri.seg[i].kind == ASkFree
+ || asegs_pri.seg[i].kind == ASkPreAlloc)
+ continue;
+ starts[j++] = asegs_pri.seg[i].start;
+ }
+
+ aspacem_assert(j == nSegs); /* this should not fail */
+ return nSegs;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Sanity checking and preening of the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+Bool VG_(am_do_sync_check) ( const HChar* fn,
+ const HChar* file, Int line )
+{
+ /* There's nothing we can do here; just return a dummy value. */
+ return False; /* placate gcc */
+}
+
+/* Hook to allow sanity checks to be done from aspacemgr-common.c. */
+void ML_(am_do_sanity_check)( void )
+{
+ Bool ok = sane_AixSegments( &asegs_pri );
+ aspacem_assert(ok);
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Finding segments. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Finds the segment containing 'a'. Only returns file/anon/resvn
+ segments. On AIX5 this is pretty bogus; we fake up an entry as
+ best we can by snooping round for useful information in
+ asegs_pri. */
+
+NSegment const* VG_(am_find_nsegment) ( Addr a )
+{
+ Int i;
+ AixSegment* aseg;
+ static NSegment bogus;
+
+ /* Fill in default info. */
+ bogus.kind = SkAnonC;
+ bogus.start = 0;
+ bogus.end = 0;
+ bogus.smode = SmFixed;
+ bogus.dev = 0;
+ bogus.ino = 0;
+ bogus.mode = 0;
+ bogus.offset = 0;
+ bogus.fnIdx = -1;
+ bogus.hasR = bogus.hasW = bogus.hasX = False;
+ bogus.hasT = False;
+ bogus.isCH = False;
+ bogus.mark = False;
+
+ /* Go look for it in the segment table. */
+ i = find_asegment_idx( &asegs_pri, a );
+ aspacem_assert(i >= 0 && i <= asegs_pri.used);
+
+ aseg = &asegs_pri.seg[i];
+ if (aseg->kind == ASkFree || aseg->kind == ASkPreAlloc)
+ return NULL;
+
+ bogus.start = aseg->start;
+ bogus.end = aseg->end;
+
+ /* Refine */
+ switch (aseg->kind) {
+ case ASkMText:
+ bogus.kind = SkAnonC; /* hmm, pretty darn bogus */
+ bogus.hasR = bogus.hasX = True;
+ break;
+ case ASkMData:
+ bogus.kind = SkAnonC; /* hmm, pretty darn bogus */
+ bogus.hasR = bogus.hasW = True;
+ break;
+ case ASkShmemC:
+ bogus.kind = SkShmC;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ break;
+ case ASkAnonC:
+ bogus.kind = SkAnonC;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ bogus.isCH = aseg->isCH;
+ break;
+ case ASkAnonV:
+ bogus.kind = SkAnonV;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ break;
+ case ASkFileV:
+ bogus.kind = SkFileV;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ bogus.offset = aseg->offset;
+ break;
+ default:
+ aspacem_assert(0);
+ }
+
+ return &bogus;
+}
+
+
+/* Find the next segment along from 'here', if it is a file/anon/resvn
+ segment. */
+NSegment const* VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
+{
+ ML_(am_barf)("unimplemented: VG_(am_next_nsegment)");
+ return NULL; /* placate gcc */
+}
+
+
+/* Trivial fn: return the total amount of space in anonymous mappings,
+ both for V and the client. Is used for printing stats in
+ out-of-memory messages. */
+ULong VG_(am_get_anonsize_total)( void )
+{
+ Int i;
+ ULong total = 0;
+ for (i = 0; i < asegs_pri.used; i++) {
+ if (asegs_pri.seg[i].kind == ASkAnonC
+ || asegs_pri.seg[i].kind == ASkAnonV) {
+ total += (ULong)asegs_pri.seg[i].end
+ - (ULong)asegs_pri.seg[i].start + 1ULL;
+ }
+ }
+ return total;
+}
+
+
+/* Test if a piece of memory is addressable by the client with at
+ least the "prot" protection permissions by examining the underlying
+ segments. */
+Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
+ UInt prot )
+{
+ NSegment const * const fake = VG_(am_find_nsegment)(start);
+ if (!fake)
+ return False;
+ aspacem_assert(fake->start <= start);
+ aspacem_assert(start + len - 1 <= fake->end);
+ if (fake->kind == SkAnonV || fake->kind == SkFileV)
+ return False;
+ if ((prot & VKI_PROT_READ) && !fake->hasR)
+ return False;
+ if ((prot & VKI_PROT_WRITE) && !fake->hasW)
+ return False;
+ if ((prot & VKI_PROT_EXEC) && !fake->hasX)
+ return False;
+ return True;
+}
+
+/* Variant of VG_(am_is_valid_for_client) which allows free areas to
+ be considered part of the client's addressable space. It also
+ considers reservations to be allowable, since from the client's
+ point of view they don't exist. */
+Bool VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( Addr start, SizeT len, UInt prot )
+{
+ ML_(am_barf)("unimplemented: "
+ "VG_(am_is_valid_for_client_or_free_or_resvn)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Startup, including reading /proc/self/maps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Initialise the address space manager, setting up the initial
+ segment list, and reading /proc/self/maps into it. This must
+ be called before any other function.
+
+ Takes a pointer to the SP at the time V gained control. This is
+ taken to be the highest usable address (more or less). Based on
+ that (and general consultation of tea leaves, etc) return a
+ suggested end address for the client's stack. */
+
+Addr VG_(am_startup) ( Addr sp_at_startup )
+{
+ aspacem_assert(sizeof(Word) == sizeof(void*));
+ aspacem_assert(sizeof(Addr) == sizeof(void*));
+ aspacem_assert(sizeof(SizeT) == sizeof(void*));
+ aspacem_assert(sizeof(SSizeT) == sizeof(void*));
+
+ asegs_tnew.used = 0;
+ asegs_told.used = 0;
+
+ asegs_pri.used = 1;
+ init_AixSegments( &asegs_pri );
+ aspacem_assert( sane_AixSegments(&asegs_pri) );
+
+ if (0)
+ VG_(am_show_nsegments)(0,"AFTER VG_(am_startup)");
+
+ /* We do not make an initial read of /proc/../map since doing so
+ would leave us without a way to communicate the results to a
+ caller. Hence we expect that the caller (m_main) will call
+ VG_(am_aix5_reread_procmap) soon after this call so as to get
+ the initial code/data segments recorded. */
+
+ /* Return value is irrelevant since we don't lay out the
+ client's stack; it is already done. */
+ return 0;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Preallocation (acquiring space from sbrk). ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static
+SysRes local_do_sbrk_NO_NOTIFY( Word delta )
+{
+ SysRes res;
+ aspacem_assert(__NR_AIX5_sbrk != __NR_AIX5_UNKNOWN);
+ res = VG_(do_syscall1)(__NR_AIX5_sbrk, (UWord)delta);
+ /* kernel produces (-1, VKI_ENOMEM) on failure. I think that's
+ ok. */
+ return res;
+}
+
+
+/* Find the ix of a prealloc section containing at least req_sz bytes,
+ or -1 if not found. Uses best-fit. */
+
+static Int find_prealloc_idx ( SizeT req_sz )
+{
+ SizeT best_sz, this_sz;
+ Int best_ix, i;
+ aspacem_assert(sizeof(SizeT) == sizeof(Addr));
+ aspacem_assert(req_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_sz));
+
+ best_sz = Addr_MAX;
+ best_ix = -1;
+
+ for (i = 0; i < asegs_pri.used; i++) {
+ AixSegment* s = &asegs_pri.seg[i];
+ if (s->kind != ASkPreAlloc)
+ continue;
+ this_sz
+ = s->end + 1 - s->start;
+ aspacem_assert(this_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(this_sz));
+ if (this_sz >= req_sz && this_sz < best_sz) {
+ best_sz = this_sz;
+ best_ix = i;
+ }
+ }
+
+ return best_ix;
+}
+
+
+/* Create a new prealloc section containing req_sz bytes. Returns
+ False if failed, True on success. */
+
+static Bool new_prealloc ( SizeT req_sz )
+{
+ SysRes sres;
+ AixSegment seg;
+ Addr start;
+ SSizeT delta;
+ HChar* why = NULL;
+
+ aspacem_assert(req_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_sz));
+
+ /* m_syswrap may have decided that it's not currently safe to allow
+ allocations from sbrk-world. If so, we have to fail. */
+ if (0 && !VG_(am_aix5_sbrk_allowed)) {
+ why = "sbrk disallowed";
+ goto fail;
+ }
+
+ /* Get the current limit. */
+ sres = local_do_sbrk_NO_NOTIFY(0);
+ if (sres.isError) {
+ why = "initial sbrk failed";
+ goto fail;
+ }
+
+ /* Get it page aligned */
+ delta = AM_4K_ROUNDUP(sres.res) - sres.res;
+ aspacem_assert(delta >= 0 && delta < AM_4K_PAGESZ);
+ if (delta > 0) {
+ sres = local_do_sbrk_NO_NOTIFY(delta);
+ if (sres.isError) {
+ why = "aligning sbrk failed";
+ goto fail;
+ }
+ }
+
+ /* Now the brk is aligned. Try to acquire the block. */
+ sres = local_do_sbrk_NO_NOTIFY(0);
+ if (sres.isError)
+ return False;
+ start = sres.res;
+ aspacem_assert( AM_IS_4K_ALIGNED( start ));
+
+ sres = local_do_sbrk_NO_NOTIFY( req_sz );
+ if (sres.isError) {
+ why = "main sbrk failed";
+ goto fail;
+ }
+
+ /* If this fails, the kernel is acting strange. */
+ aspacem_assert( sres.res == start );
+
+ init_AixSegment( &seg );
+ seg.start = start;
+ seg.end = start + req_sz - 1;
+ seg.kind = ASkPreAlloc;
+ seg.hasR = seg.hasW = seg.hasX = True; /* presumably */
+ add_asegment( &asegs_pri, &seg );
+
+ VG_(debugLog)(
+ 1, "aspacem", "new_prealloc: SUCCESS at 0x%llx size %lld\n",
+ (ULong)start, (ULong)req_sz
+ );
+ return True;
+
+ fail:
+ VG_(debugLog)(1, "aspacem", "new_prealloc: FAILED: %s\n", why);
+ return False;
+}
+
+
+/* Find the ix of a prealloc section capable of holding a block of
+ size req_sz. If none exists, try to create one first. Returns -1
+ on failure. */
+
+static Int find_or_create_prealloc_idx ( SizeT req_sz )
+{
+ Int ix;
+ SizeT req_szX;
+ Bool alloc_ok;
+
+ if (0)
+ VG_(debugLog)(0, "zz", " find_or_create_prealloc_idx ( %lu )\n",
+ req_sz);
+
+ aspacem_assert(sizeof(SizeT) == sizeof(Addr));
+ aspacem_assert(req_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_sz));
+
+ ix = find_prealloc_idx ( req_sz );
+ if (ix >= 0 && ix < asegs_pri.used)
+ return ix;
+
+ /* Not found. We'll have to allocate one. Allocate some extra at
+ the same time, so as to give a reservoir from which to satisfy
+ future requests. */
+ aspacem_assert(ix == -1);
+
+ req_szX = req_sz + AM_PREALLOC_EXTRA;
+ aspacem_assert(req_szX > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_szX));
+
+ alloc_ok = new_prealloc( req_szX );
+ if (!alloc_ok)
+ return -1; /* failed */
+
+ /* We should now be able to find it in the segment table. */
+ ix = find_prealloc_idx( req_sz );
+ aspacem_assert(ix >= 0 && ix < asegs_pri.used);
+ return ix;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The core query-notify mechanism. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Query aspacem to ask where a mapping should go. */
+
+Addr VG_(am_get_advisory) ( MapRequest* req,
+ Bool forClient,
+ /*OUT*/Bool* ok )
+{
+ ML_(am_barf)("unimplemented: VG_(am_get_advisory)");
+ /*NOTREACHED*/
+ return 0; /* placate gcc -Wall */
+}
+
+
+/* Convenience wrapper for VG_(am_get_advisory) for client floating or
+ fixed requests. If start is zero, a floating request is issued; if
+ nonzero, a fixed request at that address is issued. Same comments
+ about return values apply. */
+
+Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
+ /*OUT*/Bool* ok )
+{
+ ML_(am_barf)("unimplemented: VG_(am_get_advisory_client_simple)");
+ /*NOTREACHED*/
+ return 0; /* placate gcc -Wall */
+}
+
+
+/* Notifies aspacem that the client completed an mmap successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
+ Int fd, Off64T offset )
+{
+ AixSegment seg;
+ Bool needDiscard;
+
+ if (len == 0)
+ return False;
+
+ /* Discard is needed if any of the just-trashed range had T. */
+ needDiscard = True; /* conservative but safe */
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC; /* XXX bogus: could be a file */
+ seg.start = a;
+ seg.end = a + len - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+
+ if (0)
+ VG_(debugLog)(0,"aspacem","notify mmap ( %p, %ld, %ld, %ld )\n",
+ (void*)a, len, (UWord)prot, (UWord)flags);
+
+ add_asegment( &asegs_pri, &seg );
+ AM_SANITY_CHECK("am_notify_client_mmap");
+ return needDiscard;
+}
+
+
+/* Notifies aspacem that the client completed a shmat successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
+{
+ AixSegment seg;
+ init_AixSegment( &seg );
+ seg.kind = ASkShmemC;
+ seg.start = a;
+ seg.end = seg.start + len - 1;
+ seg.hasR = (prot & VKI_PROT_READ) ? True : False;
+ seg.hasW = (prot & VKI_PROT_WRITE) ? True : False;
+ seg.hasX = (prot & VKI_PROT_EXEC) ? True : False;
+ add_asegment( &asegs_pri, &seg );
+ AM_SANITY_CHECK("am_notify_client_shmat");
+ if (0) VG_(am_show_nsegments)(0, "after shmat");
+ return True; /* be paranoid */
+}
+
+
+/* Notifies aspacem that an mprotect was completed successfully. The
+ segment array is updated accordingly. Note, as with
+ VG_(am_notify_munmap), it is not the job of this function to reject
+ stupid mprotects, for example the client doing mprotect of
+ non-client areas. Such requests should be intercepted earlier, by
+ the syscall wrapper for mprotect. This function merely records
+ whatever it is told. If the returned Bool is True, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
+{
+ Int i, iLo, iHi;
+ Bool newR, newW, newX, needDiscard;
+
+ if (len == 0)
+ return False;
+
+ newR = toBool(prot & VKI_PROT_READ);
+ newW = toBool(prot & VKI_PROT_WRITE);
+ newX = toBool(prot & VKI_PROT_EXEC);
+
+ /* Discard is needed if we're dumping X permission */
+ needDiscard = True; /* conservative but correct */
+
+ split_asegments_lo_and_hi( &asegs_pri, start, start+len-1, &iLo, &iHi );
+
+ iLo = find_asegment_idx(&asegs_pri, start);
+ iHi = find_asegment_idx(&asegs_pri, start + len - 1);
+
+ for (i = iLo; i <= iHi; i++) {
+ aspacem_assert(i >= 0 && i < asegs_pri.used);
+ /* Apply the permissions to all relevant segments. */
+ if (asegs_pri.seg[i].kind != ASkFree) {
+ asegs_pri.seg[i].hasR = newR;
+ asegs_pri.seg[i].hasW = newW;
+ asegs_pri.seg[i].hasX = newX;
+ aspacem_assert(sane_AixSegment(&asegs_pri.seg[i]));
+ }
+ }
+ if (0)
+ VG_(debugLog)(0,"aspacem","notify mprotect ( %p, %ld, %ld )\n",
+ (void*)start, len, (UWord)prot);
+ /* Changing permissions could have made previously un-mergable
+ segments mergeable. Therefore have to re-preen them. */
+ preen_asegments(&asegs_pri);
+ AM_SANITY_CHECK("am_notify_mprotect");
+ return needDiscard;
+}
+
+
+/* Notifies aspacem that an munmap completed successfully. The
+ segment array is updated accordingly. As with
+ VG_(am_notify_munmap), we merely record the given info, and don't
+ check it for sensibleness. If the returned Bool is True, the
+ caller should immediately discard translations from the specified
+ address range. */
+
+Bool VG_(am_notify_munmap)( Addr start, SizeT len )
+{
+ Bool needDiscard = True; /* conservative but safe */
+ AixSegment seg;
+
+ if (len == 0)
+ return False;
+
+ init_AixSegment( &seg );
+ seg.kind = ASkFree;
+ seg.start = start;
+ seg.end = start + len - 1;
+ add_asegment( &asegs_pri, &seg );
+ AM_SANITY_CHECK("am_notify_munmap");
+
+ return needDiscard;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Handling mappings which do not arise directly from the ---*/
+/*--- simulation of the client. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* --- --- --- map, unmap, protect --- --- --- */
+
+/* Map a file at a fixed address for the client, and update the
+ segment array accordingly. */
+
+SysRes VG_(am_mmap_file_fixed_client)
+ ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
+{
+ SysRes r = {0,0};
+ ML_(am_barf)("unimplemented: VG_(am_mmap_file_fixed_client)");
+ /*NOTREACHED*/
+ return r;
+}
+
+
+/* Map anonymously at a fixed address for the client, and update
+ the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
+{
+ SysRes r = {0,0};
+ ML_(am_barf)("unimplemented: VG_(am_mmap_anon_fixed_client)");
+ /*NOTREACHED*/
+ return r;
+}
+
+
+/* Map anonymously at an unconstrained address for the client, and
+ update the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
+{
+ SysRes sres;
+ AixSegment seg;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* AIX seems to demand fd == -1 in anonymous mappings. hence: */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ 0, length,
+ prot,
+ VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ -1, 0
+ );
+
+ if (!sres.isError) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.start = sres.res;
+ seg.end = seg.start + length - 1;
+ seg.hasR = toBool((prot & VKI_PROT_READ) > 0);
+ seg.hasW = toBool((prot & VKI_PROT_WRITE) > 0);
+ seg.hasX = toBool((prot & VKI_PROT_EXEC) > 0);
+ seg.fromP = False;
+ add_asegment( &asegs_pri, &seg );
+ VG_(debugLog)(2, "aspacem", "new AnonC from mmap, size %lu\n",
+ length );
+ }
+
+ return sres;
+}
+
+
+/* Similarly, acquire new address space for the client but with
+ considerable restrictions on what can be done with it: (1) the
+ actual protections may exceed those stated in 'prot', (2) the
+ area's protections cannot be later changed using any form of
+ mprotect, and (3) the area cannot be freed using any form of
+ munmap. On Linux this behaves the same as
+ VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
+ by using sbrk, so as to make use of large pages on AIX. */
+
+SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
+{
+ Int ix;
+ SysRes sres;
+ AixSegment seg;
+ SizeT lenX = AM_4K_ROUNDUP(length);
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* First see if we can get space from sbrk-world. */
+ ix = find_or_create_prealloc_idx ( lenX );
+ if (ix >= 0 && ix < asegs_pri.used) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.start = asegs_pri.seg[ix].start;
+ seg.end = seg.start + lenX - 1;
+ seg.hasR = toBool((prot & VKI_PROT_READ) > 0);
+ seg.hasW = toBool((prot & VKI_PROT_WRITE) > 0);
+ seg.hasX = toBool((prot & VKI_PROT_EXEC) > 0);
+ seg.fromP = True;
+ add_asegment( &asegs_pri, &seg );
+ sres = VG_(mk_SysRes_Success)( seg.start );
+ VG_(debugLog)(2, "aspacem", "new AnonC from prealloc, size %lu\n",
+ length );
+ return sres;
+ }
+
+ /* That didn't work out. Try mmap-world instead. */
+ aspacem_assert(ix == -1);
+ return VG_(am_mmap_anon_float_client)( length, prot );
+}
+
+
+/* Map anonymously at an unconstrained address for V, and update the
+ segment array accordingly. This is fundamentally how V allocates
+ itself more address space when needed. */
+
+SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
+{
+ SysRes sres;
+ AixSegment seg;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* AIX seems to demand fd == -1 in anonymous mappings. hence: */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ 0, length,
+ VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
+ VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ -1, 0
+ );
+
+ if (!sres.isError) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonV;
+ seg.start = sres.res;
+ seg.end = seg.start + length - 1;
+ seg.hasR = seg.hasW = seg.hasX = True;
+ seg.fromP = False;
+ add_asegment( &asegs_pri, &seg );
+ VG_(debugLog)(2, "aspacem", "new AnonV from mmap, size %lu\n",
+ length );
+ }
+
+ return sres;
+}
+
+
+/* Same comments apply as per VG_(am_sbrk_anon_float_client). On
+ Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
+SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT length )
+{
+ Int ix;
+ SysRes sres;
+ AixSegment seg;
+ SizeT lenX = AM_4K_ROUNDUP(length);
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* First see if we can get space from sbrk-world. */
+ ix = find_or_create_prealloc_idx ( lenX );
+ if (ix >= 0 && ix < asegs_pri.used) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonV;
+ seg.start = asegs_pri.seg[ix].start;
+ seg.end = seg.start + lenX - 1;
+ seg.hasR = True;
+ seg.hasW = True;
+ seg.hasX = True;
+ seg.fromP = True;
+ add_asegment( &asegs_pri, &seg );
+ sres = VG_(mk_SysRes_Success)( seg.start );
+ VG_(debugLog)(2, "aspacem", "new AnonV from prealloc, size %lu\n",
+ length );
+ return sres;
+ }
+
+ /* That didn't work out. Try mmap-world instead. */
+ aspacem_assert(ix == -1);
+ return VG_(am_mmap_anon_float_valgrind)( length );
+}
+
+
+/* Really just a wrapper around VG_(am_sbrk_anon_float_valgrind). */
+
+void* VG_(am_shadow_alloc)(SizeT size)
+{
+ SysRes sres = VG_(am_sbrk_anon_float_valgrind)( size );
+ return sres.isError ? NULL : (void*)sres.res;
+}
+
+
+/* Map a file at an unconstrained address for V, and update the
+ segment array accordingly. This is used by V for transiently
+ mapping in object files to read their debug info. */
+
+SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
+ Int fd, Off64T offset )
+{
+ SysRes sres;
+
+ /* Not allowable. */
+ if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ 0, length,
+ prot, VKI_MAP_PRIVATE,
+ fd, offset
+ );
+ if (!sres.isError) {
+ AixSegment seg;
+ init_AixSegment( &seg );
+ seg.kind = SkFileV;
+ seg.start = sres.res;
+ seg.end = seg.start + length - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ seg.fname = add_to_strtab("(FileV-float, unknown name)");
+ add_asegment( &asegs_pri, &seg );
+ aspacem_assert( sane_AixSegments( &asegs_pri ));
+ }
+ return sres;
+}
+
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for the client.
+ If *need_discard is True after a successful return, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
+ Addr start, SizeT len )
+{
+ SysRes r = {0,0};
+ ML_(am_barf)("unimplemented: VG_(am_munmap_client)");
+ /*NOTREACHED*/
+ return r;
+}
+
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for valgrind. */
+/* Also, if the specified range doesn't fall within a single segment,
+ it barfs. This simplifies the implementation; we shouldn't need to
+ deal with anything but the simplest cases. */
+
+SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
+{
+ AixSegment* seg;
+ AixSegment seg2;
+ Addr end;
+ SysRes sres;
+ Int ixS, ixE;
+ Bool debug = False;
+
+ if (debug)
+ VG_(debugLog)(0,"aspacem",
+ "am_munmap_valgrind(%p, %lu)\n", (void*)start, len);
+
+ if (len == 0)
+ return VG_(mk_SysRes_Success)(0);
+
+ /* We have to be a bit careful here. If the area being unmapped is
+ AnonV which originated from a preallocated area (hence from
+ sbrk-land) then we will have to return it to the preallocated
+ state, rather than unmapping it. */
+ end = start + len - 1;
+ aspacem_assert(start <= end); // else have wraparound?!
+
+ ixS = find_asegment_idx( &asegs_pri, start );
+ ixE = find_asegment_idx( &asegs_pri, end );
+
+ aspacem_assert(ixS >= 0 && ixS < asegs_pri.used);
+ aspacem_assert(ixE >= 0 && ixE < asegs_pri.used);
+
+ /* Preconditions: See comment at start of fn */
+ aspacem_assert(ixS == ixE);
+
+ /* For the segment S denoted by ixS:
+
+ - if S is AnonV from prealloc and S entirely within start .. end,
+ return it to prealloc
+
+ - if S is AnonV not from prealloc and S entirely within start .. end,
+ munmap it
+
+ - if S is FileV and S entirely within start .. end, munmap it
+
+ Otherwise, leave it alone (too complex to handle). In theory
+ this could cause a leak; in practice I don't think it will.
+ */
+ seg = &asegs_pri.seg[ixS];
+
+ if (debug)
+ show_AixSegment( 0, ixS, seg );
+
+ /* Invariants */
+ aspacem_assert(seg->start <= start);
+ aspacem_assert(end <= seg->end);
+
+ if (seg->kind == ASkFileV
+ || (seg->kind == ASkAnonV && (!seg->fromP))) {
+ if (debug)
+ VG_(debugLog)(0,"aspacem", "am_munmap_valgrind: !fromP: %p-%p\n",
+ (void*)start, (void*)end);
+ sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
+ if (sres.isError)
+ goto bad;
+ init_AixSegment( &seg2 );
+ seg2.start = start;
+ seg2.end = end;
+ seg2.kind = ASkFree;
+ add_asegment( &asegs_pri, &seg2 );
+ }
+ else
+ if (seg->kind == ASkAnonV && seg->fromP) {
+ if (debug)
+ VG_(debugLog)(0,"aspacem", "am_munmap_valgrind: fromP: %p-%p\n",
+ (void*)start, (void*)end);
+ init_AixSegment( &seg2 );
+ seg2.start = start;
+ seg2.end = end;
+ seg2.kind = ASkPreAlloc;
+ seg2.hasR = seg2.hasW = seg2.hasX = True;
+ add_asegment( &asegs_pri, &seg2 );
+ }
+ else {
+ /* shouldn't be asked to handle any other cases */
+ aspacem_assert(0);
+ }
+
+ aspacem_assert( sane_AixSegments( &asegs_pri ));
+ return VG_(mk_SysRes_Success)(0);
+
+ bad:
+ aspacem_assert( sane_AixSegments( &asegs_pri ));
+ return VG_(mk_SysRes_Error)(VKI_EINVAL);
+}
+
+
+/* Let (start,len) denote an area within a single Valgrind-owned
+ segment (anon or file). Change the ownership of [start, start+len)
+ to the client instead. Fails if (start,len) does not denote a
+ suitable segment. */
+
+Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
+{
+ return True;
+}
+
+
+/* 'seg' must be NULL or have been obtained from
+ VG_(am_find_nsegment), and still valid. If non-NULL, and if it
+ denotes a SkAnonC (anonymous client mapping) area, set the .isCH
+ (is-client-heap) flag for that area. Otherwise do nothing.
+ (Bizarre interface so that the same code works for both Linux and
+ AIX and does not impose inefficiencies on the Linux version.) */
+/* AIX: presumably this is a faked-up segment our VG_(am_find_segment)
+ came up with. So we have to find the corresponding AixSegment. */
+
+void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
+{
+ Int i;
+ if (seg == NULL)
+ return;
+ i = find_asegment_idx( &asegs_pri, seg->start );
+ aspacem_assert(i >= 0 && i < asegs_pri.used );
+ if (asegs_pri.seg[i].kind == ASkAnonC) {
+ asegs_pri.seg[i].isCH = True;
+ if (0)
+ VG_(debugLog)(0,"aspacem","set isCH for %p\n", (void*)seg->start );
+ } else {
+ aspacem_assert(asegs_pri.seg[i].isCH == False);
+ }
+}
+
+
+/* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
+ segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
+ segment. */
+/* AIX: we ignore these complexities by conservatively assuming that
+ all segments had translations taken from them. Hence we can safely
+ ignore this. */
+void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
+{
+}
+
+
+/* --- --- --- reservations --- --- --- */
+
+/* Create a reservation from START .. START+LENGTH-1, with the given
+ ShrinkMode. When checking whether the reservation can be created,
+ also ensure that at least abs(EXTRA) extra free bytes will remain
+ above (> 0) or below (< 0) the reservation.
+
+ The reservation will only be created if it, plus the extra-zone,
+ falls entirely within a single free segment. The returned Bool
+ indicates whether the creation succeeded. */
+
+Bool VG_(am_create_reservation) ( Addr start, SizeT length,
+ ShrinkMode smode, SSizeT extra )
+{
+ ML_(am_barf)("unimplemented: VG_(am_create_reservation)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/* Let SEG be an anonymous client mapping. This fn extends the
+ mapping by DELTA bytes, taking the space from a reservation section
+ which must be adjacent. If DELTA is positive, the segment is
+ extended forwards in the address space, and the reservation must be
+ the next one along. If DELTA is negative, the segment is extended
+ backwards in the address space and the reservation must be the
+ previous one. DELTA must be page aligned. abs(DELTA) must not
+ exceed the size of the reservation segment minus one page, that is,
+ the reservation segment after the operation must be at least one
+ page long. */
+
+Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
+ SSizeT delta )
+{
+ ML_(am_barf)("unimplemented: "
+ "VG_(am_extend_into_adjacent_reservation_client)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/* --- --- --- resizing/move a mapping --- --- --- */
+
+/* Let SEG be a client mapping (anonymous or file). This fn extends
+ the mapping forwards only by DELTA bytes, and trashes whatever was
+ in the new area. Fails if SEG is not a single client mapping or if
+ the new area is not accessible to the client. Fails if DELTA is
+ not page aligned. *seg is invalid after a successful return. If
+ *need_discard is True after a successful return, the caller should
+ immediately discard translations from the new area. */
+
+Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
+ NSegment* seg, SizeT delta )
+{
+ ML_(am_barf)("unimplemented: VG_(am_extend_map_client)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/* Remap the old address range to the new address range. Fails if any
+ parameter is not page aligned, if the either size is zero, if any
+ wraparound is implied, if the old address range does not fall
+ entirely within a single segment, if the new address range overlaps
+ with the old one, or if the old address range is not a valid client
+ mapping. If *need_discard is True after a successful return, the
+ caller should immediately discard translations from both specified
+ address ranges. */
+
+Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
+ Addr old_addr, SizeT old_len,
+ Addr new_addr, SizeT new_len )
+{
+ ML_(am_barf)("unimplemented: VG_(am_relocate_nooverlap_client)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- A simple parser for /proc/<pid>/map on AIX5. ---*/
+/*--- Almost completely independent of the stuff above. The ---*/
+/*--- only function it 'exports' to the code above this comment ---*/
+/*--- is parse_procselfmaps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+#include <sys/procfs.h>
+/* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+
+
+/* Size of a smallish table used to read /proc/<pid>/map entries. */
+#define M_APROCMAP_BUF 100000
+
+/* static ... to keep it out of the stack frame. */
+static HChar procmap_buf[M_APROCMAP_BUF];
+
+/* Records length of /proc/<pid>/map read into procmap_buf. */
+static Int buf_n_tot;
+
+/* Helper fns. */
+
+/* Get the contents of /proc/<pid>/map into a static buffer. If
+ there's a syntax error, it won't fit, or other failure, just
+ abort. */
+
+static void read_procselfmap_into_buf ( void )
+{
+ Char fname[50];
+ Int n_chunk;
+ SysRes fd;
+
+ ML_(am_sprintf)( fname, "/proc/%d/map", ML_(am_getpid)() );
+
+ /* Read the initial memory mapping from the /proc filesystem. */
+ fd = ML_(am_open)( fname, VKI_O_RDONLY, 0 );
+ if (fd.isError)
+ ML_(am_barf)("can't open /proc/<pid>/map");
+
+ buf_n_tot = 0;
+ do {
+ n_chunk = ML_(am_read)( fd.res, &procmap_buf[buf_n_tot],
+ M_APROCMAP_BUF - buf_n_tot );
+ buf_n_tot += n_chunk;
+ } while ( n_chunk > 0 && buf_n_tot < M_APROCMAP_BUF );
+
+ ML_(am_close)(fd.res);
+
+ if (buf_n_tot >= M_APROCMAP_BUF-5)
+ ML_(am_barf_toolow)("M_APROCMAP_BUF");
+ if (buf_n_tot == 0)
+ ML_(am_barf)("I/O error on /proc/<pid>/map");
+
+ procmap_buf[buf_n_tot] = 0;
+}
+
+
+/* /proc/<pid>/map appears to give out a non-absolute path name for
+ the main executable. Fortunately we can reliably identify the main
+ executable via the MA_MAINEXEC bit, and if we find the path is
+ non-absolute, replace it with /proc/<pid>/object/a.out instead.
+ AIX guarantees the latter is another name for the main
+ executable. */
+
+static HChar* kludge_exe_file_name ( HChar* file_name, prmap_t* map )
+{
+ static Int my_pid = -1;
+ static HChar a_out_name[64];
+ if (file_name == NULL)
+ return NULL;
+ if (file_name[0] != '/' && (map->pr_mflags & MA_MAINEXEC)) {
+ if (my_pid == -1)
+ my_pid = ML_(am_getpid)();
+ ML_(am_sprintf)(a_out_name, "/proc/%d/object/a.out", my_pid);
+ file_name = a_out_name;
+ }
+ return file_name;
+}
+
+
+
+/* Parse /proc/<pid>/map, copying the entries in it into an
+ AixSegments structure. Returns a properly formed AixSegments, with
+ ASkMText/ASkMData entries, with sibling pointers set up, and
+ ASkFree everywhere else.
+*/
+static void parse_procselfmap ( /*OUT*/AixSegments* segs )
+{
+ UChar rr, ww, xx, mm, ss;
+ prmap_t* map;
+ UChar* file_name;
+ UChar* member_name;
+ Bool show_map;
+ Int off, i, j;
+ AixSegment s;
+
+ const UInt valid_pr_mflags
+ = MA_MAINEXEC | MA_KERNTEXT | MA_READ | MA_WRITE
+ | MA_EXEC | MA_SHARED | MA_BREAK | MA_STACK;
+
+ segs->used = 1;
+ init_AixSegments(segs);
+ aspacem_assert( sane_AixSegments(segs) );
+
+ read_procselfmap_into_buf();
+
+ if (0)
+ VG_(debugLog)(0, "procselfmaps", "got %d bytes\n", buf_n_tot);
+
+ off = 0;
+ while (True) {
+
+ /* stay sane .. */
+ if (off + sizeof(prmap_t) > buf_n_tot)
+ break;
+
+ map = (prmap_t*)&procmap_buf[off];
+ off += sizeof(prmap_t);
+
+ /* When should we stop reading the array?
+ /usr/include/sys/procfs.h says that "Array entries continue
+ until an entry with a pr_size field of 0 and invalid
+ pr_mflags occurs." It unhelpfully fails to define what
+ "invalid" means here. However, the following test _seems_ to
+ work. */
+ if (map->pr_size == 0
+ && (map->pr_mflags & valid_pr_mflags) == 0)
+ break;
+
+ /* Ok, keep going, but ignore any zero-sized mappings: */
+ if (map->pr_size == 0)
+ continue;
+
+ mm = (map->pr_mflags & MA_MAINEXEC) > 0;
+ rr = (map->pr_mflags & MA_READ) > 0;
+ ww = (map->pr_mflags & MA_WRITE) > 0;
+ xx = (map->pr_mflags & MA_EXEC) > 0;
+ ss = (map->pr_mflags & MA_SHARED) > 0;
+
+ if (map->pr_pathoff > 0) {
+ file_name = &procmap_buf[map->pr_pathoff];
+ member_name = file_name + VG_(strlen)(file_name) + 1;
+ if (*member_name == 0)
+ member_name = NULL;
+ } else {
+ file_name = member_name = NULL;
+ }
+ file_name = kludge_exe_file_name( file_name, map );
+
+ /* Now file_name and member_name are NULL or ordinary strings.
+ Convert them to string-table resident strings. */
+ if (file_name)
+ file_name = add_to_strtab(file_name);
+ if (member_name)
+ member_name = add_to_strtab(member_name);
+
+ /* Create a suitable kind of segment. Initially we will start
+ with bogus sibling pointers, and allow ASkMData entries to
+ have file names, since we cannot assume anything about the
+ ordering of entries in the procmap file. In a second pass,
+ we will set up the sibling pointers based on those file
+ names, then remove the MData file names. */
+ init_AixSegment(&s);
+ show_map = False;
+ if (rr && (!ww) && xx) {
+ if (map->pr_size > 0) {
+ /* r-x segment; add bounds for a text area. */
+ s.kind = ASkMText;
+ s.start = (Addr)map->pr_vaddr;
+ s.end = (Addr)map->pr_vaddr + (Addr)map->pr_size - 1;
+ s.isMainExe = mm;
+ s.sibling = 0;
+ s.fname = file_name;
+ s.mname = member_name;
+ s.hasR = rr;
+ s.hasW = ww;
+ s.hasX = xx;
+ add_asegment(segs, &s);
+ }
+ }
+ else
+ if (rr && ww && (!xx)) {
+ if (map->pr_size > 0) {
+ /* rw- segment; add bounds for a data area. */
+ s.kind = ASkMData;
+ s.start = (Addr)map->pr_vaddr;
+ s.end = (Addr)map->pr_vaddr + (Addr)map->pr_size - 1;
+ /* Set a bogus non-zero sibling pointer, since sanity
+ checking will reject zero sibling pointers on MData.
+ It doesn't matter since the loops following this one
+ below fix up the sibling pointers. */
+ s.sibling = 1;
+ s.fname = file_name;
+ s.mname = member_name;
+ s.hasR = rr;
+ s.hasW = ww;
+ s.hasX = xx;
+ add_asegment(segs, &s);
+ }
+ }
+ else {
+ /* unclassifiable; we better complain. */
+ show_map = True;
+ VG_(debugLog)(0, "aspacem", "parse_procselfmap: unclassifiable:\n");
+ }
+
+ if (show_map)
+ VG_(debugLog)(1,"aspacem",
+ " %010llx-%010llx %c%c%c%c%c %s%s%s%s\n",
+ (ULong)map->pr_vaddr,
+ (ULong)map->pr_vaddr + (ULong)map->pr_size,
+ mm ? 'M' : '-',
+ rr ? 'r' : '-',
+ ww ? 'w' : '-',
+ xx ? 'x' : '-',
+ ss ? 'S' : '-',
+ file_name ? file_name : (UChar*)"(none)",
+ member_name ? "(" : "",
+ member_name ? member_name : (UChar*)"",
+ member_name ? ")" : ""
+ );
+
+ }
+
+ /* Set up sibling pointers. For each MData, find an MText with the
+ same file/member names, or complain. This is really ugly in
+ that it makes the process quadratic in the number of modules
+ mapped in, but I can't think of a (simple) better way. */
+
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind != ASkMData)
+ continue;
+ for (j = 0; j < segs->used; j++) {
+ if (segs->seg[j].kind == ASkMText
+ && segs->seg[j].fname == segs->seg[i].fname
+ && segs->seg[j].mname == segs->seg[i].mname)
+ break;
+ }
+ if (j == segs->used) {
+ VG_(debugLog)(0, "aspacem", "parse_procselfmap: "
+ "data segment with no associated text segment:\n");
+ VG_(debugLog)(0, "aspacem", "module = %s(%s)\n",
+ segs->seg[i].fname,
+ segs->seg[i].mname ? segs->seg[i].mname
+ : (UChar*)"(none)");
+ aspacem_assert(0);
+ }
+ aspacem_assert(j >= 0 && j < segs->used && j != i);
+ segs->seg[i].sibling = segs->seg[j].start;
+ }
+
+ /* (Almost) dually, for each MText, find an MData with same
+ file/member names, but don't complain if not present. */
+
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind != ASkMText)
+ continue;
+ for (j = 0; j < segs->used; j++) {
+ if (segs->seg[j].kind == ASkMData
+ && segs->seg[j].fname == segs->seg[i].fname
+ && segs->seg[j].mname == segs->seg[i].mname)
+ break;
+ }
+ if (j == segs->used) {
+ /* no corresponding MData found; harmless. */
+ } else {
+ aspacem_assert(j >= 0 && j < segs->used && j != i);
+ segs->seg[i].sibling = segs->seg[j].start;
+ }
+ }
+
+ /* Finally, get rid of fname/mname pointers on MDatas, so as to
+ adhere to the necessary representational invariants. */
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind == ASkMData){
+ segs->seg[i].fname = segs->seg[i].mname = NULL;
+ }
+ }
+
+ aspacem_assert( sane_AixSegments(segs) );
+ if (0)
+ show_AixSegments(0, "as read from procmap", segs);
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-common.c.svn-base b/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-common.c.svn-base
new file mode 100644
index 0000000..ca4dc76
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-common.c.svn-base
@@ -0,0 +1,386 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager: stuff common to all platforms ---*/
+/*--- ---*/
+/*--- m_aspacemgr-common.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+ ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
+ AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
+ ************************************************************* */
+
+#include "priv_aspacemgr.h"
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Stuff to make aspacem almost completely independent of ---*/
+/*--- the rest of Valgrind. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+//--------------------------------------------------------------
+// Simple assert and assert-like fns, which avoid dependence on
+// m_libcassert, and hence on the entire debug-info reader swamp
+
+__attribute__ ((noreturn))
+void ML_(am_exit)( Int status )
+{
+# if defined(VGO_linux)
+ (void)VG_(do_syscall1)(__NR_exit_group, status);
+# endif
+ (void)VG_(do_syscall1)(__NR_exit, status);
+ /* Why are we still alive here? */
+ /*NOTREACHED*/
+ *(volatile Int *)0 = 'x';
+ aspacem_assert(2+2 == 5);
+}
+
+void ML_(am_barf) ( HChar* what )
+{
+ VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
+ VG_(debugLog)(0, "aspacem", "Exiting now.\n");
+ ML_(am_exit)(1);
+}
+
+void ML_(am_barf_toolow) ( HChar* what )
+{
+ VG_(debugLog)(0, "aspacem",
+ "Valgrind: FATAL: %s is too low.\n", what);
+ VG_(debugLog)(0, "aspacem", " Increase it and rebuild. "
+ "Exiting now.\n");
+ ML_(am_exit)(1);
+}
+
+void ML_(am_assert_fail)( const HChar* expr,
+ const Char* file,
+ Int line,
+ const Char* fn )
+{
+ VG_(debugLog)(0, "aspacem",
+ "Valgrind: FATAL: aspacem assertion failed:\n");
+ VG_(debugLog)(0, "aspacem", " %s\n", expr);
+ VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn);
+ VG_(debugLog)(0, "aspacem", "Exiting now.\n");
+ ML_(am_exit)(1);
+}
+
+Int ML_(am_getpid)( void )
+{
+ SysRes sres = VG_(do_syscall0)(__NR_getpid);
+ aspacem_assert(!sres.isError);
+ return sres.res;
+}
+
+
+//--------------------------------------------------------------
+// A simple sprintf implementation, so as to avoid dependence on
+// m_libcprint.
+
+static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p )
+{
+ HChar** aspacem_sprintf_ptr = p;
+ *(*aspacem_sprintf_ptr)++ = c;
+}
+
+static
+UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
+{
+ Int ret;
+ Char *aspacem_sprintf_ptr = buf;
+
+ ret = VG_(debugLog_vprintf)
+ ( local_add_to_aspacem_sprintf_buf,
+ &aspacem_sprintf_ptr, format, vargs );
+ local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
+
+ return ret;
+}
+
+UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... )
+{
+ UInt ret;
+ va_list vargs;
+
+ va_start(vargs,format);
+ ret = local_vsprintf(buf, format, vargs);
+ va_end(vargs);
+
+ return ret;
+}
+
+
+//--------------------------------------------------------------
+// Direct access to a handful of syscalls. This avoids dependence on
+// m_libc*. THESE DO NOT UPDATE THE ANY aspacem-internal DATA
+// STRUCTURES (SEGMENT LISTS). DO NOT USE THEM UNLESS YOU KNOW WHAT
+// YOU ARE DOING.
+
+/* --- Pertaining to mappings --- */
+
+/* Note: this is VG_, not ML_. */
+SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
+ UInt flags, UInt fd, Off64T offset)
+{
+ SysRes res;
+ aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
+# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux)
+ /* mmap2 uses 4096 chunks even if actual page size is bigger. */
+ aspacem_assert((offset % 4096) == 0);
+ res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
+ prot, flags, fd, offset / 4096);
+# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
+ prot, flags, fd, offset);
+# else
+# error Unknown platform
+# endif
+ return res;
+}
+
+static
+SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
+{
+ return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
+}
+
+SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length)
+{
+ return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
+}
+
+SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
+ Addr old_addr,
+ SizeT old_len,
+ SizeT new_len
+ )
+{
+ /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
+ new_len, WITHOUT moving it. If it can't be extended in place,
+ fail. */
+# if defined(VGO_linux)
+ return VG_(do_syscall5)(
+ __NR_mremap,
+ old_addr, old_len, new_len,
+ 0/*flags, meaning: must be at old_addr, else FAIL */,
+ 0/*new_addr, is ignored*/
+ );
+# elif defined(VGO_aix5)
+ ML_(am_barf)("ML_(am_do_extend_mapping_NO_NOTIFY) on AIX5");
+ /* NOTREACHED, but gcc doesn't understand that */
+ return VG_(mk_SysRes_Error)(0);
+# else
+# error Unknown OS
+# endif
+}
+
+SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
+ Addr old_addr, Addr old_len,
+ Addr new_addr, Addr new_len
+ )
+{
+ /* Move the mapping old_addr .. old_addr+old_len-1 to the new
+ location and with the new length. Only needs to handle the case
+ where the two areas do not overlap, neither length is zero, and
+ all args are page aligned. */
+# if defined(VGO_linux)
+ return VG_(do_syscall5)(
+ __NR_mremap,
+ old_addr, old_len, new_len,
+ VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
+ new_addr
+ );
+# elif defined(VGO_aix5)
+ ML_(am_barf)("ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY) on AIX5");
+ /* NOTREACHED, but gcc doesn't understand that */
+ return VG_(mk_SysRes_Error)(0);
+# else
+# error Unknown OS
+# endif
+}
+
+/* --- Pertaining to files --- */
+
+SysRes ML_(am_open) ( const Char* pathname, Int flags, Int mode )
+{
+ SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
+ return res;
+}
+
+Int ML_(am_read) ( Int fd, void* buf, Int count)
+{
+ SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
+ return res.isError ? -1 : res.res;
+}
+
+void ML_(am_close) ( Int fd )
+{
+ (void)VG_(do_syscall1)(__NR_close, fd);
+}
+
+Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz)
+{
+ SysRes res;
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+ return res.isError ? -1 : res.res;
+}
+
+/* Get the dev, inode and mode info for a file descriptor, if
+ possible. Returns True on success. */
+Bool ML_(am_get_fd_d_i_m)( Int fd,
+ /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode )
+{
+ SysRes res;
+ struct vki_stat buf;
+# if defined(VGO_linux) && defined(__NR_fstat64)
+ /* Try fstat64 first as it can cope with minor and major device
+ numbers outside the 0-255 range and it works properly for x86
+ binaries on amd64 systems where fstat seems to be broken. */
+ struct vki_stat64 buf64;
+ res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64);
+ if (!res.isError) {
+ *dev = (ULong)buf64.st_dev;
+ *ino = (ULong)buf64.st_ino;
+ *mode = (UInt) buf64.st_mode;
+ return True;
+ }
+# endif
+ res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
+ if (!res.isError) {
+ *dev = (ULong)buf.st_dev;
+ *ino = (ULong)buf.st_ino;
+ *mode = (UInt) buf.st_mode;
+ return True;
+ }
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Manage stacks for Valgrind itself. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Allocate and initialise a VgStack (anonymous valgrind space).
+ Protect the stack active area and the guard areas appropriately.
+ Returns NULL on failure, else the address of the bottom of the
+ stack. On success, also sets *initial_sp to what the stack pointer
+ should be set to. */
+
+VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
+{
+ Int szB;
+ SysRes sres;
+ VgStack* stack;
+ UInt* p;
+ Int i;
+
+ /* Allocate the stack. */
+ szB = VG_STACK_GUARD_SZB
+ + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;
+
+ sres = VG_(am_mmap_anon_float_valgrind)( szB );
+ if (sres.isError)
+ return NULL;
+
+ stack = (VgStack*)sres.res;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
+
+ /* Protect the guard areas. */
+ sres = local_do_mprotect_NO_NOTIFY(
+ (Addr) &stack[0],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+ if (sres.isError) goto protect_failed;
+ VG_(am_notify_mprotect)(
+ (Addr) &stack->bytes[0],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+
+ sres = local_do_mprotect_NO_NOTIFY(
+ (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+ if (sres.isError) goto protect_failed;
+ VG_(am_notify_mprotect)(
+ (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+
+ /* Looks good. Fill the active area with junk so we can later
+ tell how much got used. */
+
+ p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
+ for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
+ p[i] = 0xDEADBEEF;
+
+ *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
+ *initial_sp -= 8;
+ *initial_sp &= ~((Addr)0xF);
+
+ VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
+ (ULong)(Addr)stack, szB);
+ ML_(am_do_sanity_check)();
+ return stack;
+
+ protect_failed:
+ /* The stack was allocated, but we can't protect it. Unmap it and
+ return NULL (failure). */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
+ ML_(am_do_sanity_check)();
+ return NULL;
+}
+
+
+/* Figure out how many bytes of the stack's active area have not
+ been used. Used for estimating if we are close to overflowing it. */
+
+Int VG_(am_get_VgStack_unused_szB)( VgStack* stack )
+{
+ Int i;
+ UInt* p;
+
+ p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
+ for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
+ if (p[i] != 0xDEADBEEF)
+ break;
+
+ return i * sizeof(UInt);
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-linux.c.svn-base b/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-linux.c.svn-base
new file mode 100644
index 0000000..615d623
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/text-base/aspacemgr-linux.c.svn-base
@@ -0,0 +1,3230 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager: segment initialisation and ---*/
+/*--- tracking, stack operations ---*/
+/*--- ---*/
+/*--- Implementation for Linux m_aspacemgr-linux.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+ ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
+ AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
+ ************************************************************* */
+
+#include "priv_aspacemgr.h"
+
+
+/* Note: many of the exported functions implemented below are
+ described more fully in comments in pub_core_aspacemgr.h.
+*/
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Overview. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Purpose
+ ~~~~~~~
+ The purpose of the address space manager (aspacem) is:
+
+ (1) to record the disposition of all parts of the process' address
+ space at all times.
+
+ (2) to the extent that it can, influence layout in ways favourable
+ to our purposes.
+
+ It is important to appreciate that whilst it can and does attempt
+ to influence layout, and usually succeeds, it isn't possible to
+ impose absolute control: in the end, the kernel is the final
+ arbiter, and can always bounce our requests.
+
+ Strategy
+ ~~~~~~~~
+ The strategy is therefore as follows:
+
+ * Track ownership of mappings. Each one can belong either to
+ Valgrind or to the client.
+
+ * Try to place the client's fixed and hinted mappings at the
+ requested addresses. Fixed mappings are allowed anywhere except
+ in areas reserved by Valgrind; the client can trash its own
+ mappings if it wants. Hinted mappings are allowed providing they
+ fall entirely in free areas; if not, they will be placed by
+ aspacem in a free area.
+
+ * Anonymous mappings are allocated so as to keep Valgrind and
+ client areas widely separated when possible. If address space
+ runs low, then they may become intermingled: aspacem will attempt
+ to use all possible space. But under most circumstances lack of
+ address space is not a problem and so the areas will remain far
+ apart.
+
+ Searches for client space start at aspacem_cStart and will wrap
+ around the end of the available space if needed. Searches for
+ Valgrind space start at aspacem_vStart and will also wrap around.
+ Because aspacem_cStart is approximately at the start of the
+ available space and aspacem_vStart is approximately in the
+ middle, for the most part the client anonymous mappings will be
+ clustered towards the start of available space, and Valgrind ones
+ in the middle.
+
+ The available space is delimited by aspacem_minAddr and
+ aspacem_maxAddr. aspacem is flexible and can operate with these
+ at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
+ to some low-ish value at startup (64M) and aspacem_maxAddr is
+ derived from the stack pointer at system startup. This seems a
+ reliable way to establish the initial boundaries.
+
+ 64-bit Linux is similar except for the important detail that the
+ upper boundary is set to 32G. The reason is so that all
+ anonymous mappings (basically all client data areas) are kept
+ below 32G, since that is the maximum range that memcheck can
+ track shadow memory using a fast 2-level sparse array. It can go
+ beyond that but runs much more slowly. The 32G limit is
+ arbitrary and is trivially changed. So, with the current
+ settings, programs on 64-bit Linux will appear to run out of
+ address space and presumably fail at the 32G limit. Given the
+ 9/8 space overhead of Memcheck, that means you should be able to
+ memcheckify programs that use up to about 14G natively.
+
+ Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
+ anonymous mappings. The client can still do fixed and hinted maps
+ at any addresses provided they do not overlap Valgrind's segments.
+ This makes Valgrind able to load prelinked .so's at their requested
+ addresses on 64-bit platforms, even if they are very high (eg,
+ 112TB).
+
+ At startup, aspacem establishes the usable limits, and advises
+ m_main to place the client stack at the top of the range, which on
+ a 32-bit machine will be just below the real initial stack. One
+ effect of this is that self-hosting sort-of works, because an inner
+ valgrind will then place its client's stack just below its own
+ initial stack.
+
+ The segment array and segment kinds
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ The central data structure is the segment array (segments[0
+ .. nsegments_used-1]). This covers the entire address space in
+ order, giving account of every byte of it. Free spaces are
+ represented explicitly as this makes many operations simpler.
+ Mergeable adjacent segments are aggressively merged so as to create
+ a "normalised" representation (preen_nsegments).
+
+ There are 7 (mutually-exclusive) segment kinds, the meaning of
+ which is important:
+
+ SkFree: a free space, which may be allocated either to Valgrind (V)
+ or the client (C).
+
+ SkAnonC: an anonymous mapping belonging to C. For these, aspacem
+ tracks a boolean indicating whether or not is is part of the
+ client's heap area (can't remember why).
+
+ SkFileC: a file mapping belonging to C.
+
+ SkShmC: a shared memory segment belonging to C.
+
+ SkAnonV: an anonymous mapping belonging to V. These cover all V's
+ dynamic memory needs, including non-client malloc/free areas,
+ shadow memory, and the translation cache.
+
+ SkFileV: a file mapping belonging to V. As far as I know these are
+ only created transiently for the purposes of reading debug info.
+
+ SkResvn: a reservation segment.
+
+ These are mostly straightforward. Reservation segments have some
+ subtlety, however.
+
+ A reservation segment is unmapped from the kernel's point of view,
+ but is an area in which aspacem will not create anonymous maps
+ (either Vs or Cs). The idea is that we will try to keep it clear
+ when the choice to do so is ours. Reservation segments are
+ 'invisible' from the client's point of view: it may choose to park
+ a fixed mapping in the middle of one, and that's just tough -- we
+ can't do anything about that. From the client's perspective
+ reservations are semantically equivalent to (although
+ distinguishable from, if it makes enquiries) free areas.
+
+ Reservations are a primitive mechanism provided for whatever
+ purposes the rest of the system wants. Currently they are used to
+ reserve the expansion space into which a growdown stack is
+ expanded, and into which the data segment is extended. Note,
+ though, those uses are entirely external to this module, which only
+ supplies the primitives.
+
+ Reservations may be shrunk in order that an adjoining anonymous
+ mapping may be extended. This makes dataseg/stack expansion work.
+ A reservation may not be shrunk below one page.
+
+ The advise/notify concept
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+ All mmap-related calls must be routed via aspacem. Calling
+ sys_mmap directly from the rest of the system is very dangerous
+ because aspacem's data structures will become out of date.
+
+ The fundamental mode of operation of aspacem is to support client
+ mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
+
+ * m_syswrap intercepts the mmap call. It examines the parameters
+ and identifies the requested placement constraints. There are
+ three possibilities: no constraint (MAny), hinted (MHint, "I
+ prefer X but will accept anything"), and fixed (MFixed, "X or
+ nothing").
+
+ * This request is passed to VG_(am_get_advisory). This decides on
+ a placement as described in detail in Strategy above. It may
+ also indicate that the map should fail, because it would trash
+ one of Valgrind's areas, which would probably kill the system.
+
+ * Control returns to the wrapper. If VG_(am_get_advisory) has
+ declared that the map should fail, then it must be made to do so.
+ Usually, though, the request is considered acceptable, in which
+ case an "advised" address is supplied. The advised address
+ replaces the original address supplied by the client, and
+ MAP_FIXED is set.
+
+ Note at this point that although aspacem has been asked for
+ advice on where to place the mapping, no commitment has yet been
+ made by either it or the kernel.
+
+ * The adjusted request is handed off to the kernel.
+
+ * The kernel's result is examined. If the map succeeded, aspacem
+ is told of the outcome (VG_(am_notify_client_mmap)), so it can
+ update its records accordingly.
+
+ This then is the central advise-notify idiom for handling client
+ mmap/munmap/mprotect/shmat:
+
+ * ask aspacem for an advised placement (or a veto)
+
+ * if not vetoed, hand request to kernel, using the advised placement
+
+ * examine result, and if successful, notify aspacem of the result.
+
+ There are also many convenience functions, eg
+ VG_(am_mmap_anon_fixed_client), which do both phases entirely within
+ aspacem.
+
+ To debug all this, a sync-checker is provided. It reads
+ /proc/self/maps, compares what it sees with aspacem's records, and
+ complains if there is a difference. --sanity-level=3 runs it before
+ and after each syscall, which is a powerful, if slow way of finding
+ buggy syscall wrappers.
+
+ Loss of pointercheck
+ ~~~~~~~~~~~~~~~~~~~~
+ Up to and including Valgrind 2.4.1, x86 segmentation was used to
+ enforce seperation of V and C, so that wild writes by C could not
+ trash V. This got called "pointercheck". Unfortunately, the new
+ more flexible memory layout, plus the need to be portable across
+ different architectures, means doing this in hardware is no longer
+ viable, and doing it in software is expensive. So at the moment we
+ don't do it at all.
+*/
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The Address Space Manager's state. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* ------ start of STATE for the address-space manager ------ */
+
+/* Max number of segments we can track. */
+#define VG_N_SEGMENTS 5000
+
+/* Max number of segment file names we can track. */
+#define VG_N_SEGNAMES 1000
+
+/* Max length of a segment file name. */
+#define VG_MAX_SEGNAMELEN 1000
+
+
+typedef
+ struct {
+ Bool inUse;
+ Bool mark;
+ HChar fname[VG_MAX_SEGNAMELEN];
+ }
+ SegName;
+
+/* Filename table. _used is the high water mark; an entry is only
+ valid if its index >= 0, < _used, and its .inUse field == True.
+ The .mark field is used to garbage-collect dead entries.
+*/
+static SegName segnames[VG_N_SEGNAMES];
+static Int segnames_used = 0;
+
+
+/* Array [0 .. nsegments_used-1] of all mappings. */
+/* Sorted by .addr field. */
+/* I: len may not be zero. */
+/* I: overlapping segments are not allowed. */
+/* I: the segments cover the entire address space precisely. */
+/* Each segment can optionally hold an index into the filename table. */
+
+static NSegment nsegments[VG_N_SEGMENTS];
+static Int nsegments_used = 0;
+
+#define Addr_MIN ((Addr)0)
+#define Addr_MAX ((Addr)(-1ULL))
+
+/* Limits etc */
+
+// The smallest address that aspacem will try to allocate
+static Addr aspacem_minAddr = 0;
+
+// The largest address that aspacem will try to allocate
+static Addr aspacem_maxAddr = 0;
+
+// Where aspacem will start looking for client space
+static Addr aspacem_cStart = 0;
+
+// Where aspacem will start looking for Valgrind space
+static Addr aspacem_vStart = 0;
+
+
+#define AM_SANITY_CHECK \
+ do { \
+ if (VG_(clo_sanity_level >= 3)) \
+ aspacem_assert(VG_(am_do_sync_check) \
+ (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
+ } while (0)
+
+/* ------ end of STATE for the address-space manager ------ */
+
+/* ------ Forwards decls ------ */
+inline
+static Int find_nsegment_idx ( Addr a );
+
+static void parse_procselfmaps (
+ void (*record_mapping)( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename ),
+ void (*record_gap)( Addr addr, SizeT len )
+ );
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Functions for finding information about file descriptors. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Extract the device, inode and mode numbers for a fd. */
+static
+Bool get_inode_for_fd ( Int fd, /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode )
+{
+ return ML_(am_get_fd_d_i_m)(fd, dev, ino, mode);
+}
+
+/* Given a file descriptor, attempt to deduce its filename. To do
+ this, we use /proc/self/fd/<FD>. If this doesn't point to a file,
+ or if it doesn't exist, we return False. */
+static
+Bool get_name_for_fd ( Int fd, /*OUT*/HChar* buf, Int nbuf )
+{
+ Int i;
+ HChar tmp[64];
+
+ ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd);
+ for (i = 0; i < nbuf; i++) buf[i] = 0;
+
+ if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
+ return True;
+ else
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- SegName array management. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Searches the filename table to find an index for the given name.
+ If none is found, an index is allocated and the name stored. If no
+ space is available we just give up. If the string is too long to
+ store, return -1.
+*/
+static Int allocate_segname ( const HChar* name )
+{
+ Int i, j, len;
+
+ aspacem_assert(name);
+
+ if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
+
+ len = VG_(strlen)(name);
+ if (len >= VG_MAX_SEGNAMELEN-1) {
+ return -1;
+ }
+
+ /* first see if we already have the name. */
+ for (i = 0; i < segnames_used; i++) {
+ if (!segnames[i].inUse)
+ continue;
+ if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
+ return i;
+ }
+ }
+
+ /* no we don't. So look for a free slot. */
+ for (i = 0; i < segnames_used; i++)
+ if (!segnames[i].inUse)
+ break;
+
+ if (i == segnames_used) {
+ /* no free slots .. advance the high-water mark. */
+ if (segnames_used+1 < VG_N_SEGNAMES) {
+ i = segnames_used;
+ segnames_used++;
+ } else {
+ ML_(am_barf_toolow)("VG_N_SEGNAMES");
+ }
+ }
+
+ /* copy it in */
+ segnames[i].inUse = True;
+ for (j = 0; j < len; j++)
+ segnames[i].fname[j] = name[j];
+ aspacem_assert(len < VG_MAX_SEGNAMELEN);
+ segnames[i].fname[len] = 0;
+ return i;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Displaying the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static HChar* show_SegKind ( SegKind sk )
+{
+ switch (sk) {
+ case SkFree: return " ";
+ case SkAnonC: return "anon";
+ case SkAnonV: return "ANON";
+ case SkFileC: return "file";
+ case SkFileV: return "FILE";
+ case SkShmC: return "shm ";
+ case SkResvn: return "RSVN";
+ default: return "????";
+ }
+}
+
+static HChar* show_ShrinkMode ( ShrinkMode sm )
+{
+ switch (sm) {
+ case SmLower: return "SmLower";
+ case SmUpper: return "SmUpper";
+ case SmFixed: return "SmFixed";
+ default: return "Sm?????";
+ }
+}
+
+static void show_Addr_concisely ( /*OUT*/HChar* buf, Addr aA )
+{
+ HChar* fmt;
+ ULong a = (ULong)aA;
+
+ if (a < 10*1000*1000ULL) {
+ fmt = "%7llu";
+ }
+ else if (a < 999999ULL * (1ULL<<20)) {
+ fmt = "%6llum";
+ a >>= 20;
+ }
+ else if (a < 999999ULL * (1ULL<<30)) {
+ fmt = "%6llug";
+ a >>= 30;
+ }
+ else if (a < 999999ULL * (1ULL<<40)) {
+ fmt = "%6llut";
+ a >>= 40;
+ }
+ else {
+ fmt = "%6llue";
+ a >>= 50;
+ }
+ ML_(am_sprintf)(buf, fmt, a);
+}
+
+
+/* Show full details of an NSegment */
+
+static void __attribute__ ((unused))
+ show_nsegment_full ( Int logLevel, NSegment* seg )
+{
+ HChar* name = "(none)";
+ if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
+ && segnames[seg->fnIdx].inUse
+ && segnames[seg->fnIdx].fname[0] != 0)
+ name = segnames[seg->fnIdx].fname;
+
+ VG_(debugLog)(logLevel, "aspacem",
+ "NSegment{%s, start=0x%llx, end=0x%llx, smode=%s, dev=%llu, "
+ "ino=%llu, offset=%lld, fnIdx=%d, hasR=%d, hasW=%d, hasX=%d, "
+ "hasT=%d, mark=%d, name=\"%s\"}\n",
+ show_SegKind(seg->kind),
+ (ULong)seg->start,
+ (ULong)seg->end,
+ show_ShrinkMode(seg->smode),
+ seg->dev, seg->ino, seg->offset, seg->fnIdx,
+ (Int)seg->hasR, (Int)seg->hasW, (Int)seg->hasX, (Int)seg->hasT,
+ (Int)seg->mark,
+ name
+ );
+}
+
+
+/* Show an NSegment in a user-friendly-ish way. */
+
+static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
+{
+ HChar len_buf[20];
+ ULong len = ((ULong)seg->end) - ((ULong)seg->start) + 1;
+ show_Addr_concisely(len_buf, len);
+
+ switch (seg->kind) {
+
+ case SkFree:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf
+ );
+ break;
+
+ case SkAnonC: case SkAnonV: case SkShmC:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf,
+ seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
+ seg->isCH ? 'H' : '-'
+ );
+ break;
+
+ case SkFileC: case SkFileV:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
+ "i=%-7lld o=%-7lld (%d)\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf,
+ seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
+ seg->isCH ? 'H' : '-',
+ seg->dev, seg->ino, seg->offset, seg->fnIdx
+ );
+ break;
+
+ case SkResvn:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf,
+ seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
+ seg->isCH ? 'H' : '-',
+ show_ShrinkMode(seg->smode)
+ );
+ break;
+
+ default:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: ???? UNKNOWN SEGMENT KIND\n",
+ segNo
+ );
+ break;
+ }
+}
+
+/* Print out the segment array (debugging only!). */
+void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
+{
+ Int i;
+ VG_(debugLog)(logLevel, "aspacem",
+ "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
+ who, nsegments_used, segnames_used);
+ for (i = 0; i < segnames_used; i++) {
+ if (!segnames[i].inUse)
+ continue;
+ VG_(debugLog)(logLevel, "aspacem",
+ "(%2d) %s\n", i, segnames[i].fname);
+ }
+ for (i = 0; i < nsegments_used; i++)
+ show_nsegment( logLevel, i, &nsegments[i] );
+ VG_(debugLog)(logLevel, "aspacem",
+ ">>>\n");
+}
+
+
+/* Get the filename corresponding to this segment, if known and if it
+ has one. The returned name's storage cannot be assumed to be
+ persistent, so the caller should immediately copy the name
+ elsewhere. */
+HChar* VG_(am_get_filename)( NSegment const * seg )
+{
+ Int i;
+ aspacem_assert(seg);
+ i = seg->fnIdx;
+ if (i < 0 || i >= segnames_used || !segnames[i].inUse)
+ return NULL;
+ else
+ return &segnames[i].fname[0];
+}
+
+/* Collect up the start addresses of all non-free, non-resvn segments.
+ The interface is a bit strange in order to avoid potential
+ segment-creation races caused by dynamic allocation of the result
+ buffer *starts.
+
+ The function first computes how many entries in the result
+ buffer *starts will be needed. If this number <= nStarts,
+ they are placed in starts[0..], and the number is returned.
+ If nStarts is not large enough, nothing is written to
+ starts[0..], and the negation of the size is returned.
+
+ Correct use of this function may mean calling it multiple times in
+ order to establish a suitably-sized buffer. */
+
+Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
+{
+ Int i, j, nSegs;
+
+ /* don't pass dumbass arguments */
+ aspacem_assert(nStarts >= 0);
+
+ nSegs = 0;
+ for (i = 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
+ continue;
+ nSegs++;
+ }
+
+ if (nSegs > nStarts) {
+ /* The buffer isn't big enough. Tell the caller how big it needs
+ to be. */
+ return -nSegs;
+ }
+
+ /* There's enough space. So write into the result buffer. */
+ aspacem_assert(nSegs <= nStarts);
+
+ j = 0;
+ for (i = 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
+ continue;
+ starts[j] = nsegments[i].start;
+ j++;
+ }
+
+ aspacem_assert(j == nSegs); /* this should not fail */
+ return nSegs;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Sanity checking and preening of the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Check representational invariants for NSegments. */
+
+static Bool sane_NSegment ( NSegment* s )
+{
+ if (s == NULL) return False;
+
+ /* No zero sized segments and no wraparounds. */
+ if (s->start >= s->end) return False;
+
+ /* .mark is used for admin purposes only. */
+ if (s->mark) return False;
+
+ /* require page alignment */
+ if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
+ if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
+
+ switch (s->kind) {
+
+ case SkFree:
+ return
+ s->smode == SmFixed
+ && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
+ && !s->hasR && !s->hasW && !s->hasX && !s->hasT
+ && !s->isCH;
+
+ case SkAnonC: case SkAnonV: case SkShmC:
+ return
+ s->smode == SmFixed
+ && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
+ && (s->kind==SkAnonC ? True : !s->isCH);
+
+ case SkFileC: case SkFileV:
+ return
+ s->smode == SmFixed
+ && (s->fnIdx == -1 ||
+ (s->fnIdx >= 0 && s->fnIdx < segnames_used
+ && segnames[s->fnIdx].inUse))
+ && !s->isCH;
+
+ case SkResvn:
+ return
+ s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
+ && !s->hasR && !s->hasW && !s->hasX && !s->hasT
+ && !s->isCH;
+
+ default:
+ return False;
+ }
+}
+
+
+/* Try merging s2 into s1, if possible. If successful, s1 is
+ modified, and True is returned. Otherwise s1 is unchanged and
+ False is returned. */
+
+static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
+{
+ if (s1->kind != s2->kind)
+ return False;
+
+ if (s1->end+1 != s2->start)
+ return False;
+
+ /* reject cases which would cause wraparound */
+ if (s1->start > s2->end)
+ return False;
+
+ switch (s1->kind) {
+
+ case SkFree:
+ s1->end = s2->end;
+ return True;
+
+ case SkAnonC: case SkAnonV:
+ if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
+ && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
+ s1->end = s2->end;
+ s1->hasT |= s2->hasT;
+ return True;
+ }
+ break;
+
+ case SkFileC: case SkFileV:
+ if (s1->hasR == s2->hasR
+ && s1->hasW == s2->hasW && s1->hasX == s2->hasX
+ && s1->dev == s2->dev && s1->ino == s2->ino
+ && s2->offset == s1->offset
+ + ((ULong)s2->start) - ((ULong)s1->start) ) {
+ s1->end = s2->end;
+ s1->hasT |= s2->hasT;
+ return True;
+ }
+ break;
+
+ case SkShmC:
+ return False;
+
+ case SkResvn:
+ if (s1->smode == SmFixed && s2->smode == SmFixed) {
+ s1->end = s2->end;
+ return True;
+ }
+
+ default:
+ break;
+
+ }
+
+ return False;
+}
+
+
+/* Sanity-check and canonicalise the segment array (merge mergable
+ segments). Returns True if any segments were merged. */
+
+static Bool preen_nsegments ( void )
+{
+ Int i, j, r, w, nsegments_used_old = nsegments_used;
+
+ /* Pass 1: check the segment array covers the entire address space
+ exactly once, and also that each segment is sane. */
+ aspacem_assert(nsegments_used > 0);
+ aspacem_assert(nsegments[0].start == Addr_MIN);
+ aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
+
+ aspacem_assert(sane_NSegment(&nsegments[0]));
+ for (i = 1; i < nsegments_used; i++) {
+ aspacem_assert(sane_NSegment(&nsegments[i]));
+ aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
+ }
+
+ /* Pass 2: merge as much as possible, using
+ maybe_merge_segments. */
+ w = 0;
+ for (r = 1; r < nsegments_used; r++) {
+ if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
+ /* nothing */
+ } else {
+ w++;
+ if (w != r)
+ nsegments[w] = nsegments[r];
+ }
+ }
+ w++;
+ aspacem_assert(w > 0 && w <= nsegments_used);
+ nsegments_used = w;
+
+ /* Pass 3: free up unused string table slots */
+ /* clear mark bits */
+ for (i = 0; i < segnames_used; i++)
+ segnames[i].mark = False;
+ /* mark */
+ for (i = 0; i < nsegments_used; i++) {
+ j = nsegments[i].fnIdx;
+ aspacem_assert(j >= -1 && j < segnames_used);
+ if (j >= 0) {
+ aspacem_assert(segnames[j].inUse);
+ segnames[j].mark = True;
+ }
+ }
+ /* release */
+ for (i = 0; i < segnames_used; i++) {
+ if (segnames[i].mark == False) {
+ segnames[i].inUse = False;
+ segnames[i].fname[0] = 0;
+ }
+ }
+
+ return nsegments_used != nsegments_used_old;
+}
+
+
+/* Check the segment array corresponds with the kernel's view of
+ memory layout. sync_check_ok returns True if no anomalies were
+ found, else False. In the latter case the mismatching segments are
+ displayed.
+
+ The general idea is: we get the kernel to show us all its segments
+ and also the gaps in between. For each such interval, try and find
+ a sequence of appropriate intervals in our segment array which
+ cover or more than cover the kernel's interval, and which all have
+ suitable kinds/permissions etc.
+
+ Although any specific kernel interval is not matched exactly to a
+ valgrind interval or sequence thereof, eventually any disagreement
+ on mapping boundaries will be detected. This is because, if for
+ example valgrind's intervals cover a greater range than the current
+ kernel interval, it must be the case that a neighbouring free-space
+ interval belonging to valgrind cannot cover the neighbouring
+ free-space interval belonging to the kernel. So the disagreement
+ is detected.
+
+ In other words, we examine each kernel interval in turn, and check
+ we do not disagree over the range of that interval. Because all of
+ the address space is examined, any disagreements must eventually be
+ detected.
+*/
+
+static Bool sync_check_ok = False;
+
+static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename )
+{
+ Int iLo, iHi, i;
+ Bool sloppyXcheck;
+
+ /* If a problem has already been detected, don't continue comparing
+ segments, so as to avoid flooding the output with error
+ messages. */
+ if (!sync_check_ok)
+ return;
+
+ if (len == 0)
+ return;
+
+ /* The kernel should not give us wraparounds. */
+ aspacem_assert(addr <= addr + len - 1);
+
+ iLo = find_nsegment_idx( addr );
+ iHi = find_nsegment_idx( addr + len - 1 );
+
+ /* These 5 should be guaranteed by find_nsegment_idx. */
+ aspacem_assert(0 <= iLo && iLo < nsegments_used);
+ aspacem_assert(0 <= iHi && iHi < nsegments_used);
+ aspacem_assert(iLo <= iHi);
+ aspacem_assert(nsegments[iLo].start <= addr );
+ aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
+
+ /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
+ most recent NX-bit enabled CPUs) and so recent kernels attempt
+ to provide execute protection by placing all executable mappings
+ low down in the address space and then reducing the size of the
+ code segment to prevent code at higher addresses being executed.
+
+ These kernels report which mappings are really executable in
+ the /proc/self/maps output rather than mirroring what was asked
+ for when each mapping was created. In order to cope with this we
+ have a sloppyXcheck mode which we enable on x86 - in this mode we
+ allow the kernel to report execute permission when we weren't
+ expecting it but not vice versa. */
+# if defined(VGA_x86)
+ sloppyXcheck = True;
+# else
+ sloppyXcheck = False;
+# endif
+
+ /* NSegments iLo .. iHi inclusive should agree with the presented
+ data. */
+ for (i = iLo; i <= iHi; i++) {
+
+ Bool same, cmp_offsets, cmp_devino;
+ UInt seg_prot;
+
+ /* compare the kernel's offering against ours. */
+ same = nsegments[i].kind == SkAnonC
+ || nsegments[i].kind == SkAnonV
+ || nsegments[i].kind == SkFileC
+ || nsegments[i].kind == SkFileV
+ || nsegments[i].kind == SkShmC;
+
+ seg_prot = 0;
+ if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
+ if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
+ if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
+
+ cmp_offsets
+ = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
+
+ cmp_devino
+ = nsegments[i].dev != 0 || nsegments[i].ino != 0;
+
+ /* Consider other reasons to not compare dev/inode */
+
+ /* bproc does some godawful hack on /dev/zero at process
+ migration, which changes the name of it, and its dev & ino */
+ if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
+ cmp_devino = False;
+
+ /* hack apparently needed on MontaVista Linux */
+ if (filename && VG_(strstr)(filename, "/.lib-ro/"))
+ cmp_devino = False;
+
+ /* If we are doing sloppy execute permission checks then we
+ allow segment to have X permission when we weren't expecting
+ it (but not vice versa) so if the kernel reported execute
+ permission then pretend that this segment has it regardless
+ of what we were expecting. */
+ if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
+ seg_prot |= VKI_PROT_EXEC;
+ }
+
+ same = same
+ && seg_prot == prot
+ && (cmp_devino
+ ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
+ : True)
+ && (cmp_offsets
+ ? nsegments[i].start-nsegments[i].offset == addr-offset
+ : True);
+ if (!same) {
+ sync_check_ok = False;
+ VG_(debugLog)(
+ 0,"aspacem",
+ "sync_check_mapping_callback: segment mismatch: V's seg:\n");
+ show_nsegment_full( 0, &nsegments[i] );
+ goto show_kern_seg;
+ }
+ }
+
+ /* Looks harmless. Keep going. */
+ return;
+
+ show_kern_seg:
+ VG_(debugLog)(0,"aspacem",
+ "sync_check_mapping_callback: "
+ "segment mismatch: kernel's seg:\n");
+ VG_(debugLog)(0,"aspacem",
+ "start=0x%llx end=0x%llx prot=%u "
+ "dev=%llu ino=%llu offset=%lld name=\"%s\"\n",
+ (ULong)addr, ((ULong)addr) + ((ULong)len) - 1,
+ prot, dev, ino, offset,
+ filename ? (HChar*)filename : "(none)" );
+ return;
+}
+
+static void sync_check_gap_callback ( Addr addr, SizeT len )
+{
+ Int iLo, iHi, i;
+
+ /* If a problem has already been detected, don't continue comparing
+ segments, so as to avoid flooding the output with error
+ messages. */
+ if (!sync_check_ok)
+ return;
+
+ if (len == 0)
+ return;
+
+ /* The kernel should not give us wraparounds. */
+ aspacem_assert(addr <= addr + len - 1);
+
+ iLo = find_nsegment_idx( addr );
+ iHi = find_nsegment_idx( addr + len - 1 );
+
+ /* These 5 should be guaranteed by find_nsegment_idx. */
+ aspacem_assert(0 <= iLo && iLo < nsegments_used);
+ aspacem_assert(0 <= iHi && iHi < nsegments_used);
+ aspacem_assert(iLo <= iHi);
+ aspacem_assert(nsegments[iLo].start <= addr );
+ aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
+
+ /* NSegments iLo .. iHi inclusive should agree with the presented
+ data. */
+ for (i = iLo; i <= iHi; i++) {
+
+ Bool same;
+
+ /* compare the kernel's offering against ours. */
+ same = nsegments[i].kind == SkFree
+ || nsegments[i].kind == SkResvn;
+
+ if (!same) {
+ sync_check_ok = False;
+ VG_(debugLog)(
+ 0,"aspacem",
+ "sync_check_mapping_callback: segment mismatch: V's gap:\n");
+ show_nsegment_full( 0, &nsegments[i] );
+ goto show_kern_gap;
+ }
+ }
+
+ /* Looks harmless. Keep going. */
+ return;
+
+ show_kern_gap:
+ VG_(debugLog)(0,"aspacem",
+ "sync_check_gap_callback: segment mismatch: kernel's gap:\n");
+ VG_(debugLog)(0,"aspacem",
+ "start=0x%llx end=0x%llx\n",
+ (ULong)addr, ((ULong)addr) + ((ULong)len) - 1 );
+ return;
+}
+
+
+/* Sanity check: check that Valgrind and the kernel agree on the
+ address space layout. Prints offending segments and call point if
+ a discrepancy is detected, but does not abort the system. Returned
+ Bool is False if a discrepancy was found. */
+
+Bool VG_(am_do_sync_check) ( const HChar* fn,
+ const HChar* file, Int line )
+{
+ sync_check_ok = True;
+ if (0)
+ VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
+ parse_procselfmaps( sync_check_mapping_callback,
+ sync_check_gap_callback );
+ if (!sync_check_ok) {
+ VG_(debugLog)(0,"aspacem",
+ "sync check at %s:%d (%s): FAILED\n",
+ file, line, fn);
+ VG_(debugLog)(0,"aspacem", "\n");
+
+# if 0
+ {
+ HChar buf[100];
+ VG_(am_show_nsegments)(0,"post syncheck failure");
+ VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
+ VG_(system)(buf);
+ }
+# endif
+
+ }
+ return sync_check_ok;
+}
+
+/* Hook to allow sanity checks to be done from aspacemgr-common.c. */
+void ML_(am_do_sanity_check)( void )
+{
+ AM_SANITY_CHECK;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Low level access / modification of the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Binary search the interval array for a given address. Since the
+ array covers the entire address space the search cannot fail. The
+ _WRK function does the real work. Its caller (just below) caches
+ the results thereof, to save time. With N_CACHE of 63 we get a hit
+ rate exceeding 90% when running OpenOffice.
+
+ Re ">> 12", it doesn't matter that the page size of some targets
+ might be different from 12. Really "(a >> 12) % N_CACHE" is merely
+ a hash function, and the actual cache entry is always validated
+ correctly against the selected cache entry before use.
+*/
+/* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
+__attribute__((noinline))
+static Int find_nsegment_idx_WRK ( Addr a )
+{
+ Addr a_mid_lo, a_mid_hi;
+ Int mid,
+ lo = 0,
+ hi = nsegments_used-1;
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) {
+ /* Not found. This can't happen. */
+ ML_(am_barf)("find_nsegment_idx: not found");
+ }
+ mid = (lo + hi) / 2;
+ a_mid_lo = nsegments[mid].start;
+ a_mid_hi = nsegments[mid].end;
+
+ if (a < a_mid_lo) { hi = mid-1; continue; }
+ if (a > a_mid_hi) { lo = mid+1; continue; }
+ aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
+ aspacem_assert(0 <= mid && mid < nsegments_used);
+ return mid;
+ }
+}
+
+inline static Int find_nsegment_idx ( Addr a )
+{
+# define N_CACHE 63
+ static Addr cache_pageno[N_CACHE];
+ static Int cache_segidx[N_CACHE];
+ static Bool cache_inited = False;
+
+ static UWord n_q = 0;
+ static UWord n_m = 0;
+
+ UWord ix;
+
+ if (LIKELY(cache_inited)) {
+ /* do nothing */
+ } else {
+ for (ix = 0; ix < N_CACHE; ix++) {
+ cache_pageno[ix] = 0;
+ cache_segidx[ix] = -1;
+ }
+ cache_inited = True;
+ }
+
+ ix = (a >> 12) % N_CACHE;
+
+ n_q++;
+ if (0 && 0 == (n_q & 0xFFFF))
+ VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
+
+ if ((a >> 12) == cache_pageno[ix]
+ && cache_segidx[ix] >= 0
+ && cache_segidx[ix] < nsegments_used
+ && nsegments[cache_segidx[ix]].start <= a
+ && a <= nsegments[cache_segidx[ix]].end) {
+ /* hit */
+ /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
+ return cache_segidx[ix];
+ }
+ /* miss */
+ n_m++;
+ cache_segidx[ix] = find_nsegment_idx_WRK(a);
+ cache_pageno[ix] = a >> 12;
+ return cache_segidx[ix];
+# undef N_CACHE
+}
+
+
+
+/* Finds the segment containing 'a'. Only returns file/anon/resvn
+ segments. This returns a 'NSegment const *' - a pointer to
+ readonly data. */
+NSegment const * VG_(am_find_nsegment) ( Addr a )
+{
+ Int i = find_nsegment_idx(a);
+ aspacem_assert(i >= 0 && i < nsegments_used);
+ aspacem_assert(nsegments[i].start <= a);
+ aspacem_assert(a <= nsegments[i].end);
+ if (nsegments[i].kind == SkFree)
+ return NULL;
+ else
+ return &nsegments[i];
+}
+
+
+/* Given a pointer to a seg, tries to figure out which one it is in
+ nsegments[..]. Very paranoid. */
+static Int segAddr_to_index ( NSegment* seg )
+{
+ Int i;
+ if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
+ return -1;
+ i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
+ if (i < 0 || i >= nsegments_used)
+ return -1;
+ if (seg == &nsegments[i])
+ return i;
+ return -1;
+}
+
+
+/* Find the next segment along from 'here', if it is a file/anon/resvn
+ segment. */
+NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
+{
+ Int i = segAddr_to_index(here);
+ if (i < 0 || i >= nsegments_used)
+ return NULL;
+ if (fwds) {
+ i++;
+ if (i >= nsegments_used)
+ return NULL;
+ } else {
+ i--;
+ if (i < 0)
+ return NULL;
+ }
+ switch (nsegments[i].kind) {
+ case SkFileC: case SkFileV: case SkShmC:
+ case SkAnonC: case SkAnonV: case SkResvn:
+ return &nsegments[i];
+ default:
+ break;
+ }
+ return NULL;
+}
+
+
+/* Trivial fn: return the total amount of space in anonymous mappings,
+ both for V and the client. Is used for printing stats in
+ out-of-memory messages. */
+ULong VG_(am_get_anonsize_total)( void )
+{
+ Int i;
+ ULong total = 0;
+ for (i = 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
+ total += (ULong)nsegments[i].end
+ - (ULong)nsegments[i].start + 1ULL;
+ }
+ }
+ return total;
+}
+
+
+/* Test if a piece of memory is addressable by the client with at
+ least the "prot" protection permissions by examining the underlying
+ segments. If freeOk is True then SkFree areas are also allowed.
+*/
+static
+Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
+{
+ Int i, iLo, iHi;
+ Bool needR, needW, needX;
+
+ if (len == 0)
+ return True; /* somewhat dubious case */
+ if (start + len < start)
+ return False; /* reject wraparounds */
+
+ needR = toBool(prot & VKI_PROT_READ);
+ needW = toBool(prot & VKI_PROT_WRITE);
+ needX = toBool(prot & VKI_PROT_EXEC);
+
+ iLo = find_nsegment_idx(start);
+ aspacem_assert(start >= nsegments[iLo].start);
+
+ if (start+len-1 <= nsegments[iLo].end) {
+ /* This is a speedup hack which avoids calling find_nsegment_idx
+ a second time when possible. It is always correct to just
+ use the "else" clause below, but is_valid_for_client is
+ called a lot by the leak checker, so avoiding pointless calls
+ to find_nsegment_idx, which can be expensive, is helpful. */
+ iHi = iLo;
+ } else {
+ iHi = find_nsegment_idx(start + len - 1);
+ }
+
+ for (i = iLo; i <= iHi; i++) {
+ if ( (nsegments[i].kind == SkFileC
+ || nsegments[i].kind == SkAnonC
+ || nsegments[i].kind == SkShmC
+ || (nsegments[i].kind == SkFree && freeOk)
+ || (nsegments[i].kind == SkResvn && freeOk))
+ && (needR ? nsegments[i].hasR : True)
+ && (needW ? nsegments[i].hasW : True)
+ && (needX ? nsegments[i].hasX : True) ) {
+ /* ok */
+ } else {
+ return False;
+ }
+ }
+ return True;
+}
+
+/* Test if a piece of memory is addressable by the client with at
+ least the "prot" protection permissions by examining the underlying
+ segments. */
+Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
+ UInt prot )
+{
+ return is_valid_for_client( start, len, prot, False/*free not OK*/ );
+}
+
+/* Variant of VG_(am_is_valid_for_client) which allows free areas to
+ be consider part of the client's addressable space. It also
+ considers reservations to be allowable, since from the client's
+ point of view they don't exist. */
+Bool VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( Addr start, SizeT len, UInt prot )
+{
+ return is_valid_for_client( start, len, prot, True/*free is OK*/ );
+}
+
+
+/* Test if a piece of memory is addressable by valgrind with at least
+ PROT_NONE protection permissions by examining the underlying
+ segments. */
+static Bool is_valid_for_valgrind( Addr start, SizeT len )
+{
+ Int i, iLo, iHi;
+
+ if (len == 0)
+ return True; /* somewhat dubious case */
+ if (start + len < start)
+ return False; /* reject wraparounds */
+
+ iLo = find_nsegment_idx(start);
+ iHi = find_nsegment_idx(start + len - 1);
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
+ /* ok */
+ } else {
+ return False;
+ }
+ }
+ return True;
+}
+
+
+/* Returns True if any part of the address range is marked as having
+ translations made from it. This is used to determine when to
+ discard code, so if in doubt return True. */
+
+static Bool any_Ts_in_range ( Addr start, SizeT len )
+{
+ Int iLo, iHi, i;
+ aspacem_assert(len > 0);
+ aspacem_assert(start + len > start);
+ iLo = find_nsegment_idx(start);
+ iHi = find_nsegment_idx(start + len - 1);
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].hasT)
+ return True;
+ }
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Modifying the segment array, and constructing segments. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Split the segment containing 'a' into two, so that 'a' is
+ guaranteed to be the start of a new segment. If 'a' is already the
+ start of a segment, do nothing. */
+
+static void split_nsegment_at ( Addr a )
+{
+ Int i, j;
+
+ aspacem_assert(a > 0);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(a));
+
+ i = find_nsegment_idx(a);
+ aspacem_assert(i >= 0 && i < nsegments_used);
+
+ if (nsegments[i].start == a)
+ /* 'a' is already the start point of a segment, so nothing to be
+ done. */
+ return;
+
+ /* else we have to slide the segments upwards to make a hole */
+ if (nsegments_used >= VG_N_SEGMENTS)
+ ML_(am_barf_toolow)("VG_N_SEGMENTS");
+ for (j = nsegments_used-1; j > i; j--)
+ nsegments[j+1] = nsegments[j];
+ nsegments_used++;
+
+ nsegments[i+1] = nsegments[i];
+ nsegments[i+1].start = a;
+ nsegments[i].end = a-1;
+
+ if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
+ nsegments[i+1].offset
+ += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
+
+ aspacem_assert(sane_NSegment(&nsegments[i]));
+ aspacem_assert(sane_NSegment(&nsegments[i+1]));
+}
+
+
+/* Do the minimum amount of segment splitting necessary to ensure that
+ sLo is the first address denoted by some segment and sHi is the
+ highest address denoted by some other segment. Returns the indices
+ of the lowest and highest segments in the range. */
+
+static
+void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
+ /*OUT*/Int* iLo,
+ /*OUT*/Int* iHi )
+{
+ aspacem_assert(sLo < sHi);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
+
+ if (sLo > 0)
+ split_nsegment_at(sLo);
+ if (sHi < sHi+1)
+ split_nsegment_at(sHi+1);
+
+ *iLo = find_nsegment_idx(sLo);
+ *iHi = find_nsegment_idx(sHi);
+ aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
+ aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
+ aspacem_assert(*iLo <= *iHi);
+ aspacem_assert(nsegments[*iLo].start == sLo);
+ aspacem_assert(nsegments[*iHi].end == sHi);
+ /* Not that I'm overly paranoid or anything, definitely not :-) */
+}
+
+
+/* Add SEG to the collection, deleting/truncating any it overlaps.
+ This deals with all the tricky cases of splitting up segments as
+ needed. */
+
+static void add_segment ( NSegment* seg )
+{
+ Int i, iLo, iHi, delta;
+ Bool segment_is_sane;
+
+ Addr sStart = seg->start;
+ Addr sEnd = seg->end;
+
+ aspacem_assert(sStart <= sEnd);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
+
+ segment_is_sane = sane_NSegment(seg);
+ if (!segment_is_sane) show_nsegment_full(0,seg);
+ aspacem_assert(segment_is_sane);
+
+ split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
+
+ /* Now iLo .. iHi inclusive is the range of segment indices which
+ seg will replace. If we're replacing more than one segment,
+ slide those above the range down to fill the hole. */
+ delta = iHi - iLo;
+ aspacem_assert(delta >= 0);
+ if (delta > 0) {
+ for (i = iLo; i < nsegments_used-delta; i++)
+ nsegments[i] = nsegments[i+delta];
+ nsegments_used -= delta;
+ }
+
+ nsegments[iLo] = *seg;
+
+ (void)preen_nsegments();
+ if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
+}
+
+
+/* Clear out an NSegment record. */
+
+static void init_nsegment ( /*OUT*/NSegment* seg )
+{
+ seg->kind = SkFree;
+ seg->start = 0;
+ seg->end = 0;
+ seg->smode = SmFixed;
+ seg->dev = 0;
+ seg->ino = 0;
+ seg->mode = 0;
+ seg->offset = 0;
+ seg->fnIdx = -1;
+ seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
+ seg->mark = False;
+}
+
+/* Make an NSegment which holds a reservation. */
+
+static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
+{
+ aspacem_assert(start < end);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
+ init_nsegment(seg);
+ seg->kind = SkResvn;
+ seg->start = start;
+ seg->end = end;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Startup, including reading /proc/self/maps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename )
+{
+ NSegment seg;
+ init_nsegment( &seg );
+ seg.start = addr;
+ seg.end = addr+len-1;
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.offset = offset;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ seg.hasT = False;
+
+ /* Don't use the presence of a filename to decide if a segment in
+ the initial /proc/self/maps to decide if the segment is an AnonV
+ or FileV segment as some systems don't report the filename. Use
+ the device and inode numbers instead. Fixes bug #124528. */
+ seg.kind = SkAnonV;
+ if (dev != 0 && ino != 0)
+ seg.kind = SkFileV;
+ if (filename)
+ seg.fnIdx = allocate_segname( filename );
+
+ if (0) show_nsegment( 2,0, &seg );
+ add_segment( &seg );
+}
+
+/* Initialise the address space manager, setting up the initial
+ segment list, and reading /proc/self/maps into it. This must
+ be called before any other function.
+
+ Takes a pointer to the SP at the time V gained control. This is
+ taken to be the highest usable address (more or less). Based on
+ that (and general consultation of tea leaves, etc) return a
+ suggested end address for the client's stack. */
+
+Addr VG_(am_startup) ( Addr sp_at_startup )
+{
+ NSegment seg;
+ Addr suggested_clstack_top;
+
+ aspacem_assert(sizeof(Word) == sizeof(void*));
+ aspacem_assert(sizeof(Addr) == sizeof(void*));
+ aspacem_assert(sizeof(SizeT) == sizeof(void*));
+ aspacem_assert(sizeof(SSizeT) == sizeof(void*));
+
+ /* Check that we can store the largest imaginable dev, ino and
+ offset numbers in an NSegment. */
+ aspacem_assert(sizeof(seg.dev) == 8);
+ aspacem_assert(sizeof(seg.ino) == 8);
+ aspacem_assert(sizeof(seg.offset) == 8);
+ aspacem_assert(sizeof(seg.mode) == 4);
+
+ /* Add a single interval covering the entire address space. */
+ init_nsegment(&seg);
+ seg.kind = SkFree;
+ seg.start = Addr_MIN;
+ seg.end = Addr_MAX;
+ nsegments[0] = seg;
+ nsegments_used = 1;
+
+ /* Establish address limits and block out unusable parts
+ accordingly. */
+
+ VG_(debugLog)(2, "aspacem",
+ " sp_at_startup = 0x%010llx (supplied)\n",
+ (ULong)sp_at_startup );
+
+ aspacem_minAddr = (Addr) 0x04000000; // 64M
+
+# if VG_WORDSIZE == 8
+ aspacem_maxAddr = (Addr)0x800000000 - 1; // 32G
+# ifdef ENABLE_INNER
+ { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
+ if (aspacem_maxAddr > cse)
+ aspacem_maxAddr = cse;
+ }
+# endif
+# else
+ aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
+# endif
+
+ aspacem_cStart = aspacem_minAddr; // 64M
+ aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
+# ifdef ENABLE_INNER
+ aspacem_vStart -= 0x10000000; // 256M
+# endif
+
+ suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
+ + VKI_PAGE_SIZE;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
+
+ VG_(debugLog)(2, "aspacem",
+ " minAddr = 0x%010llx (computed)\n",
+ (ULong)aspacem_minAddr);
+ VG_(debugLog)(2, "aspacem",
+ " maxAddr = 0x%010llx (computed)\n",
+ (ULong)aspacem_maxAddr);
+ VG_(debugLog)(2, "aspacem",
+ " cStart = 0x%010llx (computed)\n",
+ (ULong)aspacem_cStart);
+ VG_(debugLog)(2, "aspacem",
+ " vStart = 0x%010llx (computed)\n",
+ (ULong)aspacem_vStart);
+ VG_(debugLog)(2, "aspacem",
+ "suggested_clstack_top = 0x%010llx (computed)\n",
+ (ULong)suggested_clstack_top);
+
+ if (aspacem_cStart > Addr_MIN) {
+ init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
+ add_segment(&seg);
+ }
+ if (aspacem_maxAddr < Addr_MAX) {
+ init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
+ add_segment(&seg);
+ }
+
+ /* Create a 1-page reservation at the notional initial
+ client/valgrind boundary. This isn't strictly necessary, but
+ because the advisor does first-fit and starts searches for
+ valgrind allocations at the boundary, this is kind of necessary
+ in order to get it to start allocating in the right place. */
+ init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1);
+ add_segment(&seg);
+
+ VG_(am_show_nsegments)(2, "Initial layout");
+
+ VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
+ parse_procselfmaps( read_maps_callback, NULL );
+
+ VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
+
+ AM_SANITY_CHECK;
+ return suggested_clstack_top;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The core query-notify mechanism. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Query aspacem to ask where a mapping should go. */
+
+Addr VG_(am_get_advisory) ( MapRequest* req,
+ Bool forClient,
+ /*OUT*/Bool* ok )
+{
+ /* This function implements allocation policy.
+
+ The nature of the allocation request is determined by req, which
+ specifies the start and length of the request and indicates
+ whether the start address is mandatory, a hint, or irrelevant,
+ and by forClient, which says whether this is for the client or
+ for V.
+
+ Return values: the request can be vetoed (*ok is set to False),
+ in which case the caller should not attempt to proceed with
+ making the mapping. Otherwise, *ok is set to True, the caller
+ may proceed, and the preferred address at which the mapping
+ should happen is returned.
+
+ Note that this is an advisory system only: the kernel can in
+ fact do whatever it likes as far as placement goes, and we have
+ no absolute control over it.
+
+ Allocations will never be granted in a reserved area.
+
+ The Default Policy is:
+
+ Search the address space for two free intervals: one of them
+ big enough to contain the request without regard to the
+ specified address (viz, as if it was a floating request) and
+ the other being able to contain the request at the specified
+ address (viz, as if were a fixed request). Then, depending on
+ the outcome of the search and the kind of request made, decide
+ whether the request is allowable and what address to advise.
+
+ The Default Policy is overriden by Policy Exception #1:
+
+ If the request is for a fixed client map, we are prepared to
+ grant it providing all areas inside the request are either
+ free, reservations, or mappings belonging to the client. In
+ other words we are prepared to let the client trash its own
+ mappings if it wants to.
+
+ The Default Policy is overriden by Policy Exception #2:
+
+ If the request is for a hinted client map, we are prepared to
+ grant it providing all areas inside the request are either
+ free or reservations. In other words we are prepared to let
+ the client have a hinted mapping anywhere it likes provided
+ it does not trash either any of its own mappings or any of
+ valgrind's mappings.
+ */
+ Int i, j;
+ Addr holeStart, holeEnd, holeLen;
+ Bool fixed_not_required;
+
+ Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
+
+ Addr reqStart = req->rkind==MAny ? 0 : req->start;
+ Addr reqEnd = reqStart + req->len - 1;
+ Addr reqLen = req->len;
+
+ /* These hold indices for segments found during search, or -1 if not
+ found. */
+ Int floatIdx = -1;
+ Int fixedIdx = -1;
+
+ aspacem_assert(nsegments_used > 0);
+
+ if (0) {
+ VG_(am_show_nsegments)(0,"getAdvisory");
+ VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
+ (ULong)req->start, (ULong)req->len);
+ }
+
+ /* Reject zero-length requests */
+ if (req->len == 0) {
+ *ok = False;
+ return 0;
+ }
+
+ /* Reject wraparounds */
+ if ((req->rkind==MFixed || req->rkind==MHint)
+ && req->start + req->len < req->start) {
+ *ok = False;
+ return 0;
+ }
+
+ /* ------ Implement Policy Exception #1 ------ */
+
+ if (forClient && req->rkind == MFixed) {
+ Int iLo = find_nsegment_idx(reqStart);
+ Int iHi = find_nsegment_idx(reqEnd);
+ Bool allow = True;
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].kind == SkFree
+ || nsegments[i].kind == SkFileC
+ || nsegments[i].kind == SkAnonC
+ || nsegments[i].kind == SkShmC
+ || nsegments[i].kind == SkResvn) {
+ /* ok */
+ } else {
+ allow = False;
+ break;
+ }
+ }
+ if (allow) {
+ /* Acceptable. Granted. */
+ *ok = True;
+ return reqStart;
+ }
+ /* Not acceptable. Fail. */
+ *ok = False;
+ return 0;
+ }
+
+ /* ------ Implement Policy Exception #2 ------ */
+
+ if (forClient && req->rkind == MHint) {
+ Int iLo = find_nsegment_idx(reqStart);
+ Int iHi = find_nsegment_idx(reqEnd);
+ Bool allow = True;
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].kind == SkFree
+ || nsegments[i].kind == SkResvn) {
+ /* ok */
+ } else {
+ allow = False;
+ break;
+ }
+ }
+ if (allow) {
+ /* Acceptable. Granted. */
+ *ok = True;
+ return reqStart;
+ }
+ /* Not acceptable. Fall through to the default policy. */
+ }
+
+ /* ------ Implement the Default Policy ------ */
+
+ /* Don't waste time looking for a fixed match if not requested to. */
+ fixed_not_required = req->rkind == MAny;
+
+ i = find_nsegment_idx(startPoint);
+
+ /* Examine holes from index i back round to i-1. Record the
+ index first fixed hole and the first floating hole which would
+ satisfy the request. */
+ for (j = 0; j < nsegments_used; j++) {
+
+ if (nsegments[i].kind != SkFree) {
+ i++;
+ if (i >= nsegments_used) i = 0;
+ continue;
+ }
+
+ holeStart = nsegments[i].start;
+ holeEnd = nsegments[i].end;
+
+ /* Stay sane .. */
+ aspacem_assert(holeStart <= holeEnd);
+ aspacem_assert(aspacem_minAddr <= holeStart);
+ aspacem_assert(holeEnd <= aspacem_maxAddr);
+
+ /* See if it's any use to us. */
+ holeLen = holeEnd - holeStart + 1;
+
+ if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
+ fixedIdx = i;
+
+ if (floatIdx == -1 && holeLen >= reqLen)
+ floatIdx = i;
+
+ /* Don't waste time searching once we've found what we wanted. */
+ if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
+ break;
+
+ i++;
+ if (i >= nsegments_used) i = 0;
+ }
+
+ aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
+ if (fixedIdx >= 0)
+ aspacem_assert(nsegments[fixedIdx].kind == SkFree);
+
+ aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
+ if (floatIdx >= 0)
+ aspacem_assert(nsegments[floatIdx].kind == SkFree);
+
+ AM_SANITY_CHECK;
+
+ /* Now see if we found anything which can satisfy the request. */
+ switch (req->rkind) {
+ case MFixed:
+ if (fixedIdx >= 0) {
+ *ok = True;
+ return req->start;
+ } else {
+ *ok = False;
+ return 0;
+ }
+ break;
+ case MHint:
+ if (fixedIdx >= 0) {
+ *ok = True;
+ return req->start;
+ }
+ if (floatIdx >= 0) {
+ *ok = True;
+ return nsegments[floatIdx].start;
+ }
+ *ok = False;
+ return 0;
+ case MAny:
+ if (floatIdx >= 0) {
+ *ok = True;
+ return nsegments[floatIdx].start;
+ }
+ *ok = False;
+ return 0;
+ default:
+ break;
+ }
+
+ /*NOTREACHED*/
+ ML_(am_barf)("getAdvisory: unknown request kind");
+ *ok = False;
+ return 0;
+}
+
+/* Convenience wrapper for VG_(am_get_advisory) for client floating or
+ fixed requests. If start is zero, a floating request is issued; if
+ nonzero, a fixed request at that address is issued. Same comments
+ about return values apply. */
+
+Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
+ /*OUT*/Bool* ok )
+{
+ MapRequest mreq;
+ mreq.rkind = start==0 ? MAny : MFixed;
+ mreq.start = start;
+ mreq.len = len;
+ return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
+}
+
+
+/* Notifies aspacem that the client completed an mmap successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
+ Int fd, Off64T offset )
+{
+ HChar buf[VKI_PATH_MAX];
+ ULong dev, ino;
+ UInt mode;
+ NSegment seg;
+ Bool needDiscard;
+
+ aspacem_assert(len > 0);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(a));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
+
+ /* Discard is needed if any of the just-trashed range had T. */
+ needDiscard = any_Ts_in_range( a, len );
+
+ init_nsegment( &seg );
+ seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
+ seg.start = a;
+ seg.end = a + len - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ if (!(flags & VKI_MAP_ANONYMOUS)) {
+ // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
+ seg.offset = offset;
+ if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.mode = mode;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx = allocate_segname( buf );
+ }
+ }
+ add_segment( &seg );
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+/* Notifies aspacem that the client completed a shmat successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
+{
+ NSegment seg;
+ Bool needDiscard;
+
+ aspacem_assert(len > 0);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(a));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ /* Discard is needed if any of the just-trashed range had T. */
+ needDiscard = any_Ts_in_range( a, len );
+
+ init_nsegment( &seg );
+ seg.kind = SkShmC;
+ seg.start = a;
+ seg.end = a + len - 1;
+ seg.offset = 0;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+/* Notifies aspacem that an mprotect was completed successfully. The
+ segment array is updated accordingly. Note, as with
+ VG_(am_notify_munmap), it is not the job of this function to reject
+ stupid mprotects, for example the client doing mprotect of
+ non-client areas. Such requests should be intercepted earlier, by
+ the syscall wrapper for mprotect. This function merely records
+ whatever it is told. If the returned Bool is True, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
+{
+ Int i, iLo, iHi;
+ Bool newR, newW, newX, needDiscard;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ if (len == 0)
+ return False;
+
+ newR = toBool(prot & VKI_PROT_READ);
+ newW = toBool(prot & VKI_PROT_WRITE);
+ newX = toBool(prot & VKI_PROT_EXEC);
+
+ /* Discard is needed if we're dumping X permission */
+ needDiscard = any_Ts_in_range( start, len ) && !newX;
+
+ split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
+
+ iLo = find_nsegment_idx(start);
+ iHi = find_nsegment_idx(start + len - 1);
+
+ for (i = iLo; i <= iHi; i++) {
+ /* Apply the permissions to all relevant segments. */
+ switch (nsegments[i].kind) {
+ case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
+ nsegments[i].hasR = newR;
+ nsegments[i].hasW = newW;
+ nsegments[i].hasX = newX;
+ aspacem_assert(sane_NSegment(&nsegments[i]));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Changing permissions could have made previously un-mergable
+ segments mergeable. Therefore have to re-preen them. */
+ (void)preen_nsegments();
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+
+/* Notifies aspacem that an munmap completed successfully. The
+ segment array is updated accordingly. As with
+ VG_(am_notify_munmap), we merely record the given info, and don't
+ check it for sensibleness. If the returned Bool is True, the
+ caller should immediately discard translations from the specified
+ address range. */
+
+Bool VG_(am_notify_munmap)( Addr start, SizeT len )
+{
+ NSegment seg;
+ Bool needDiscard;
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ if (len == 0)
+ return False;
+
+ needDiscard = any_Ts_in_range( start, len );
+
+ init_nsegment( &seg );
+ seg.start = start;
+ seg.end = start + len - 1;
+
+ /* The segment becomes unused (free). Segments from above
+ aspacem_maxAddr were originally SkResvn and so we make them so
+ again. Note, this isn't really right when the segment straddles
+ the aspacem_maxAddr boundary - then really it should be split in
+ two, the lower part marked as SkFree and the upper part as
+ SkResvn. Ah well. */
+ if (start > aspacem_maxAddr
+ && /* check previous comparison is meaningful */
+ aspacem_maxAddr < Addr_MAX)
+ seg.kind = SkResvn;
+ else
+ /* Ditto for segments from below aspacem_minAddr. */
+ if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
+ seg.kind = SkResvn;
+ else
+ seg.kind = SkFree;
+
+ add_segment( &seg );
+
+ /* Unmapping could create two adjacent free segments, so a preen is
+ needed. add_segment() will do that, so no need to here. */
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Handling mappings which do not arise directly from the ---*/
+/*--- simulation of the client. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* --- --- --- map, unmap, protect --- --- --- */
+
+/* Map a file at a fixed address for the client, and update the
+ segment array accordingly. */
+
+SysRes VG_(am_mmap_file_fixed_client)
+ ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+ ULong dev, ino;
+ UInt mode;
+ HChar buf[VKI_PATH_MAX];
+
+ /* Not allowable. */
+ if (length == 0
+ || !VG_IS_PAGE_ALIGNED(start)
+ || !VG_IS_PAGE_ALIGNED(offset))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MFixed;
+ req.start = start;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok || advised != start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ start, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE,
+ fd, offset
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkFileC;
+ seg.start = start;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.offset = offset;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.mode = mode;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx = allocate_segname( buf );
+ }
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* Map anonymously at a fixed address for the client, and update
+ the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+
+ /* Not allowable. */
+ if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MFixed;
+ req.start = start;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok || advised != start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ start, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkAnonC;
+ seg.start = start;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* Map anonymously at an unconstrained address for the client, and
+ update the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MAny;
+ req.start = 0;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ advised address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ advised, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkAnonC;
+ seg.start = advised;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* Similarly, acquire new address space for the client but with
+ considerable restrictions on what can be done with it: (1) the
+ actual protections may exceed those stated in 'prot', (2) the
+ area's protections cannot be later changed using any form of
+ mprotect, and (3) the area cannot be freed using any form of
+ munmap. On Linux this behaves the same as
+ VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
+ by using sbrk, so as to make use of large pages on AIX. */
+
+SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
+{
+ return VG_(am_mmap_anon_float_client) ( length, prot );
+}
+
+
+/* Map anonymously at an unconstrained address for V, and update the
+ segment array accordingly. This is fundamentally how V allocates
+ itself more address space when needed. */
+
+SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MAny;
+ req.start = 0;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ advised, length,
+ VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkAnonV;
+ seg.start = advised;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR = True;
+ seg.hasW = True;
+ seg.hasX = True;
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+/* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
+
+void* VG_(am_shadow_alloc)(SizeT size)
+{
+ SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
+ return sres.isError ? NULL : (void*)sres.res;
+}
+
+/* Same comments apply as per VG_(am_sbrk_anon_float_client). On
+ Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
+
+SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
+{
+ return VG_(am_mmap_anon_float_valgrind)( cszB );
+}
+
+
+/* Map a file at an unconstrained address for V, and update the
+ segment array accordingly. This is used by V for transiently
+ mapping in object files to read their debug info. */
+
+SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
+ Int fd, Off64T offset )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+ ULong dev, ino;
+ UInt mode;
+ HChar buf[VKI_PATH_MAX];
+
+ /* Not allowable. */
+ if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MAny;
+ req.start = 0;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ advised, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE,
+ fd, offset
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkFileV;
+ seg.start = sres.res;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.offset = offset;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.mode = mode;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx = allocate_segname( buf );
+ }
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* --- --- munmap helper --- --- */
+
+static
+SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
+ Addr start, SizeT len, Bool forClient )
+{
+ Bool d;
+ SysRes sres;
+
+ if (!VG_IS_PAGE_ALIGNED(start))
+ goto eINVAL;
+
+ if (len == 0) {
+ *need_discard = False;
+ return VG_(mk_SysRes_Success)( 0 );
+ }
+
+ if (start + len < len)
+ goto eINVAL;
+
+ len = VG_PGROUNDUP(len);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ if (forClient) {
+ if (!VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( start, len, VKI_PROT_NONE ))
+ goto eINVAL;
+ } else {
+ if (!is_valid_for_valgrind( start, len ))
+ goto eINVAL;
+ }
+
+ d = any_Ts_in_range( start, len );
+
+ sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
+ if (sres.isError)
+ return sres;
+
+ VG_(am_notify_munmap)( start, len );
+ AM_SANITY_CHECK;
+ *need_discard = d;
+ return sres;
+
+ eINVAL:
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+}
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for the client.
+ If *need_discard is True after a successful return, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
+ Addr start, SizeT len )
+{
+ return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
+}
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for valgrind. */
+
+SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
+{
+ Bool need_discard;
+ SysRes r = am_munmap_both_wrk( &need_discard,
+ start, len, False/*valgrind*/ );
+ /* If this assertion fails, it means we allowed translations to be
+ made from a V-owned section. Which shouldn't happen. */
+ if (!r.isError)
+ aspacem_assert(!need_discard);
+ return r;
+}
+
+/* Let (start,len) denote an area within a single Valgrind-owned
+ segment (anon or file). Change the ownership of [start, start+len)
+ to the client instead. Fails if (start,len) does not denote a
+ suitable segment. */
+
+Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
+{
+ Int i, iLo, iHi;
+
+ if (len == 0)
+ return True;
+ if (start + len < start)
+ return False;
+ if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
+ return False;
+
+ i = find_nsegment_idx(start);
+ if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
+ return False;
+ if (start+len-1 > nsegments[i].end)
+ return False;
+
+ aspacem_assert(start >= nsegments[i].start);
+ aspacem_assert(start+len-1 <= nsegments[i].end);
+
+ /* This scheme is like how mprotect works: split the to-be-changed
+ range into its own segment(s), then mess with them (it). There
+ should be only one. */
+ split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
+ aspacem_assert(iLo == iHi);
+ switch (nsegments[iLo].kind) {
+ case SkFileV: nsegments[iLo].kind = SkFileC; break;
+ case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
+ default: aspacem_assert(0); /* can't happen - guarded above */
+ }
+
+ preen_nsegments();
+ return True;
+}
+
+/* 'seg' must be NULL or have been obtained from
+ VG_(am_find_nsegment), and still valid. If non-NULL, and if it
+ denotes a SkAnonC (anonymous client mapping) area, set the .isCH
+ (is-client-heap) flag for that area. Otherwise do nothing.
+ (Bizarre interface so that the same code works for both Linux and
+ AIX and does not impose inefficiencies on the Linux version.) */
+void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
+{
+ Int i = segAddr_to_index( seg );
+ aspacem_assert(i >= 0 && i < nsegments_used);
+ if (nsegments[i].kind == SkAnonC) {
+ nsegments[i].isCH = True;
+ } else {
+ aspacem_assert(nsegments[i].isCH == False);
+ }
+}
+
+/* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
+ segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
+ segment. */
+void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
+{
+ Int i = segAddr_to_index( seg );
+ aspacem_assert(i >= 0 && i < nsegments_used);
+ if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
+ nsegments[i].hasT = True;
+ }
+}
+
+
+/* --- --- --- reservations --- --- --- */
+
+/* Create a reservation from START .. START+LENGTH-1, with the given
+ ShrinkMode. When checking whether the reservation can be created,
+ also ensure that at least abs(EXTRA) extra free bytes will remain
+ above (> 0) or below (< 0) the reservation.
+
+ The reservation will only be created if it, plus the extra-zone,
+ falls entirely within a single free segment. The returned Bool
+ indicates whether the creation succeeded. */
+
+Bool VG_(am_create_reservation) ( Addr start, SizeT length,
+ ShrinkMode smode, SSizeT extra )
+{
+ Int startI, endI;
+ NSegment seg;
+
+ /* start and end, not taking into account the extra space. */
+ Addr start1 = start;
+ Addr end1 = start + length - 1;
+
+ /* start and end, taking into account the extra space. */
+ Addr start2 = start1;
+ Addr end2 = end1;
+
+ if (extra < 0) start2 += extra; // this moves it down :-)
+ if (extra > 0) end2 += extra;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
+
+ startI = find_nsegment_idx( start2 );
+ endI = find_nsegment_idx( end2 );
+
+ /* If the start and end points don't fall within the same (free)
+ segment, we're hosed. This does rely on the assumption that all
+ mergeable adjacent segments can be merged, but add_segment()
+ should ensure that. */
+ if (startI != endI)
+ return False;
+
+ if (nsegments[startI].kind != SkFree)
+ return False;
+
+ /* Looks good - make the reservation. */
+ aspacem_assert(nsegments[startI].start <= start2);
+ aspacem_assert(end2 <= nsegments[startI].end);
+
+ init_nsegment( &seg );
+ seg.kind = SkResvn;
+ seg.start = start1; /* NB: extra space is not included in the
+ reservation. */
+ seg.end = end1;
+ seg.smode = smode;
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/* Let SEG be an anonymous client mapping. This fn extends the
+ mapping by DELTA bytes, taking the space from a reservation section
+ which must be adjacent. If DELTA is positive, the segment is
+ extended forwards in the address space, and the reservation must be
+ the next one along. If DELTA is negative, the segment is extended
+ backwards in the address space and the reservation must be the
+ previous one. DELTA must be page aligned. abs(DELTA) must not
+ exceed the size of the reservation segment minus one page, that is,
+ the reservation segment after the operation must be at least one
+ page long. */
+
+Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
+ SSizeT delta )
+{
+ Int segA, segR;
+ UInt prot;
+ SysRes sres;
+
+ /* Find the segment array index for SEG. If the assertion fails it
+ probably means you passed in a bogus SEG. */
+ segA = segAddr_to_index( seg );
+ aspacem_assert(segA >= 0 && segA < nsegments_used);
+
+ if (nsegments[segA].kind != SkAnonC)
+ return False;
+
+ if (delta == 0)
+ return True;
+
+ prot = (nsegments[segA].hasR ? VKI_PROT_READ : 0)
+ | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
+ | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
+
+ if (delta > 0) {
+
+ /* Extending the segment forwards. */
+ segR = segA+1;
+ if (segR >= nsegments_used
+ || nsegments[segR].kind != SkResvn
+ || nsegments[segR].smode != SmLower
+ || nsegments[segR].start != nsegments[segA].end + 1
+ || delta + VKI_PAGE_SIZE
+ > (nsegments[segR].end - nsegments[segR].start + 1))
+ return False;
+
+ /* Extend the kernel's mapping. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ nsegments[segR].start, delta,
+ prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return False; /* kernel bug if this happens? */
+ if (sres.res != nsegments[segR].start) {
+ /* kernel bug if this happens? */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, delta );
+ return False;
+ }
+
+ /* Ok, success with the kernel. Update our structures. */
+ nsegments[segR].start += delta;
+ nsegments[segA].end += delta;
+ aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
+
+ } else {
+
+ /* Extending the segment backwards. */
+ delta = -delta;
+ aspacem_assert(delta > 0);
+
+ segR = segA-1;
+ if (segR < 0
+ || nsegments[segR].kind != SkResvn
+ || nsegments[segR].smode != SmUpper
+ || nsegments[segR].end + 1 != nsegments[segA].start
+ || delta + VKI_PAGE_SIZE
+ > (nsegments[segR].end - nsegments[segR].start + 1))
+ return False;
+
+ /* Extend the kernel's mapping. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ nsegments[segA].start-delta, delta,
+ prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return False; /* kernel bug if this happens? */
+ if (sres.res != nsegments[segA].start-delta) {
+ /* kernel bug if this happens? */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, delta );
+ return False;
+ }
+
+ /* Ok, success with the kernel. Update our structures. */
+ nsegments[segR].end -= delta;
+ nsegments[segA].start -= delta;
+ aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
+
+ }
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/* --- --- --- resizing/move a mapping --- --- --- */
+
+/* Let SEG be a client mapping (anonymous or file). This fn extends
+ the mapping forwards only by DELTA bytes, and trashes whatever was
+ in the new area. Fails if SEG is not a single client mapping or if
+ the new area is not accessible to the client. Fails if DELTA is
+ not page aligned. *seg is invalid after a successful return. If
+ *need_discard is True after a successful return, the caller should
+ immediately discard translations from the new area. */
+
+Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
+ NSegment* seg, SizeT delta )
+{
+ Addr xStart;
+ SysRes sres;
+ NSegment seg_copy = *seg;
+ SizeT seg_old_len = seg->end + 1 - seg->start;
+
+ if (0)
+ VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
+
+ if (seg->kind != SkFileC && seg->kind != SkAnonC)
+ return False;
+
+ if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
+ return False;
+
+ xStart = seg->end+1;
+ if (xStart + delta < delta)
+ return False;
+
+ if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
+ VKI_PROT_NONE ))
+ return False;
+
+ AM_SANITY_CHECK;
+ sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
+ seg_old_len,
+ seg_old_len + delta );
+ if (sres.isError) {
+ AM_SANITY_CHECK;
+ return False;
+ } else {
+ /* the area must not have moved */
+ aspacem_assert(sres.res == seg->start);
+ }
+
+ *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
+
+ seg_copy.end += delta;
+ add_segment( &seg_copy );
+
+ if (0)
+ VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/* Remap the old address range to the new address range. Fails if any
+ parameter is not page aligned, if the either size is zero, if any
+ wraparound is implied, if the old address range does not fall
+ entirely within a single segment, if the new address range overlaps
+ with the old one, or if the old address range is not a valid client
+ mapping. If *need_discard is True after a successful return, the
+ caller should immediately discard translations from both specified
+ address ranges. */
+
+Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
+ Addr old_addr, SizeT old_len,
+ Addr new_addr, SizeT new_len )
+{
+ Int iLo, iHi;
+ SysRes sres;
+ NSegment seg;
+
+ if (old_len == 0 || new_len == 0)
+ return False;
+
+ if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
+ || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
+ return False;
+
+ if (old_addr + old_len < old_addr
+ || new_addr + new_len < new_addr)
+ return False;
+
+ if (old_addr + old_len - 1 < new_addr
+ || new_addr + new_len - 1 < old_addr) {
+ /* no overlap */
+ } else
+ return False;
+
+ iLo = find_nsegment_idx( old_addr );
+ iHi = find_nsegment_idx( old_addr + old_len - 1 );
+ if (iLo != iHi)
+ return False;
+
+ if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
+ return False;
+
+ sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
+ ( old_addr, old_len, new_addr, new_len );
+ if (sres.isError) {
+ AM_SANITY_CHECK;
+ return False;
+ } else {
+ aspacem_assert(sres.res == new_addr);
+ }
+
+ *need_discard = any_Ts_in_range( old_addr, old_len )
+ || any_Ts_in_range( new_addr, new_len );
+
+ seg = nsegments[iLo];
+
+ /* Mark the new area based on the old seg. */
+ if (seg.kind == SkFileC) {
+ seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
+ } else {
+ aspacem_assert(seg.kind == SkAnonC);
+ aspacem_assert(seg.offset == 0);
+ }
+ seg.start = new_addr;
+ seg.end = new_addr + new_len - 1;
+ add_segment( &seg );
+
+ /* Create a free hole in the old location. */
+ init_nsegment( &seg );
+ seg.start = old_addr;
+ seg.end = old_addr + old_len - 1;
+ /* See comments in VG_(am_notify_munmap) about this SkResvn vs
+ SkFree thing. */
+ if (old_addr > aspacem_maxAddr
+ && /* check previous comparison is meaningful */
+ aspacem_maxAddr < Addr_MAX)
+ seg.kind = SkResvn;
+ else
+ seg.kind = SkFree;
+
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
+/*--- Almost completely independent of the stuff above. The ---*/
+/*--- only function it 'exports' to the code above this comment ---*/
+/*--- is parse_procselfmaps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Size of a smallish table used to read /proc/self/map entries. */
+#define M_PROCMAP_BUF 100000
+
+/* static ... to keep it out of the stack frame. */
+static Char procmap_buf[M_PROCMAP_BUF];
+
+/* Records length of /proc/self/maps read into procmap_buf. */
+static Int buf_n_tot;
+
+/* Helper fns. */
+
+static Int hexdigit ( Char c )
+{
+ if (c >= '0' && c <= '9') return (Int)(c - '0');
+ if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
+ if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
+ return -1;
+}
+
+static Int decdigit ( Char c )
+{
+ if (c >= '0' && c <= '9') return (Int)(c - '0');
+ return -1;
+}
+
+static Int readchar ( const Char* buf, Char* ch )
+{
+ if (*buf == 0) return 0;
+ *ch = *buf;
+ return 1;
+}
+
+static Int readhex ( const Char* buf, UWord* val )
+{
+ /* Read a word-sized hex number. */
+ Int n = 0;
+ *val = 0;
+ while (hexdigit(*buf) >= 0) {
+ *val = (*val << 4) + hexdigit(*buf);
+ n++; buf++;
+ }
+ return n;
+}
+
+static Int readhex64 ( const Char* buf, ULong* val )
+{
+ /* Read a potentially 64-bit hex number. */
+ Int n = 0;
+ *val = 0;
+ while (hexdigit(*buf) >= 0) {
+ *val = (*val << 4) + hexdigit(*buf);
+ n++; buf++;
+ }
+ return n;
+}
+
+static Int readdec64 ( const Char* buf, ULong* val )
+{
+ Int n = 0;
+ *val = 0;
+ while (hexdigit(*buf) >= 0) {
+ *val = (*val * 10) + decdigit(*buf);
+ n++; buf++;
+ }
+ return n;
+}
+
+
+/* Get the contents of /proc/self/maps into a static buffer. If
+ there's a syntax error, it won't fit, or other failure, just
+ abort. */
+
+static void read_procselfmaps_into_buf ( void )
+{
+ Int n_chunk;
+ SysRes fd;
+
+ /* Read the initial memory mapping from the /proc filesystem. */
+ fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
+ if (fd.isError)
+ ML_(am_barf)("can't open /proc/self/maps");
+
+ buf_n_tot = 0;
+ do {
+ n_chunk = ML_(am_read)( fd.res, &procmap_buf[buf_n_tot],
+ M_PROCMAP_BUF - buf_n_tot );
+ if (n_chunk >= 0)
+ buf_n_tot += n_chunk;
+ } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
+
+ ML_(am_close)(fd.res);
+
+ if (buf_n_tot >= M_PROCMAP_BUF-5)
+ ML_(am_barf_toolow)("M_PROCMAP_BUF");
+ if (buf_n_tot == 0)
+ ML_(am_barf)("I/O error on /proc/self/maps");
+
+ procmap_buf[buf_n_tot] = 0;
+}
+
+/* Parse /proc/self/maps. For each map entry, call
+ record_mapping, passing it, in this order:
+
+ start address in memory
+ length
+ page protections (using the VKI_PROT_* flags)
+ mapped file device and inode
+ offset in file, or zero if no file
+ filename, zero terminated, or NULL if no file
+
+ So the sig of the called fn might be
+
+ void (*record_mapping)( Addr start, SizeT size, UInt prot,
+ UInt dev, UInt info,
+ ULong foffset, UChar* filename )
+
+ Note that the supplied filename is transiently stored; record_mapping
+ should make a copy if it wants to keep it.
+
+ Nb: it is important that this function does not alter the contents of
+ procmap_buf!
+*/
+static void parse_procselfmaps (
+ void (*record_mapping)( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename ),
+ void (*record_gap)( Addr addr, SizeT len )
+ )
+{
+ Int i, j, i_eol;
+ Addr start, endPlusOne, gapStart;
+ UChar* filename;
+ UChar rr, ww, xx, pp, ch, tmp;
+ UInt prot;
+ UWord maj, min;
+ ULong foffset, dev, ino;
+
+ foffset = ino = 0; /* keep gcc-4.1.0 happy */
+
+ read_procselfmaps_into_buf();
+
+ aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
+
+ if (0)
+ VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
+
+ /* Ok, it's safely aboard. Parse the entries. */
+ i = 0;
+ gapStart = Addr_MIN;
+ while (True) {
+ if (i >= buf_n_tot) break;
+
+ /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
+ j = readhex(&procmap_buf[i], &start);
+ if (j > 0) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == '-') i += j; else goto syntaxerror;
+ j = readhex(&procmap_buf[i], &endPlusOne);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &rr);
+ if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &ww);
+ if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &xx);
+ if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
+ /* This field is the shared/private flag */
+ j = readchar(&procmap_buf[i], &pp);
+ if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
+ i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readhex64(&procmap_buf[i], &foffset);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readhex(&procmap_buf[i], &maj);
+ if (j > 0) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ':') i += j; else goto syntaxerror;
+ j = readhex(&procmap_buf[i], &min);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readdec64(&procmap_buf[i], &ino);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ goto read_line_ok;
+
+ syntaxerror:
+ VG_(debugLog)(0, "Valgrind:",
+ "FATAL: syntax error reading /proc/self/maps\n");
+ { Int k, m;
+ HChar buf50[51];
+ m = 0;
+ buf50[m] = 0;
+ k = i - 50;
+ if (k < 0) k = 0;
+ for (; k <= i; k++) {
+ buf50[m] = procmap_buf[k];
+ buf50[m+1] = 0;
+ if (m < 50-1) m++;
+ }
+ VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
+ }
+ ML_(am_exit)(1);
+
+ read_line_ok:
+
+ /* Try and find the name of the file mapped to this segment, if
+ it exists. Note that files can contains spaces. */
+
+ // Move i to the next non-space char, which should be either a '/' or
+ // a newline.
+ while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
+
+ // Move i_eol to the end of the line.
+ i_eol = i;
+ while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
+
+ // If there's a filename...
+ if (i < i_eol-1 && procmap_buf[i] == '/') {
+ /* Minor hack: put a '\0' at the filename end for the call to
+ 'record_mapping', then restore the old char with 'tmp'. */
+ filename = &procmap_buf[i];
+ tmp = filename[i_eol - i];
+ filename[i_eol - i] = '\0';
+ } else {
+ tmp = 0;
+ filename = NULL;
+ foffset = 0;
+ }
+
+ prot = 0;
+ if (rr == 'r') prot |= VKI_PROT_READ;
+ if (ww == 'w') prot |= VKI_PROT_WRITE;
+ if (xx == 'x') prot |= VKI_PROT_EXEC;
+
+ /* Linux has two ways to encode a device number when it
+ is exposed to user space (via fstat etc). The old way
+ is the traditional unix scheme that produces a 16 bit
+ device number with the top 8 being the major number and
+ the bottom 8 the minor number.
+
+ The new scheme allows for a 12 bit major number and
+ a 20 bit minor number by using a 32 bit device number
+ and putting the top 12 bits of the minor number into
+ the top 12 bits of the device number thus leaving an
+ extra 4 bits for the major number.
+
+ If the minor and major number are both single byte
+ values then both schemes give the same result so we
+ use the new scheme here in case either number is
+ outside the 0-255 range and then use fstat64 when
+ available (or fstat on 64 bit systems) so that we
+ should always have a new style device number and
+ everything should match. */
+ dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
+
+ if (record_gap && gapStart < start)
+ (*record_gap) ( gapStart, start-gapStart );
+
+ if (record_mapping && start < endPlusOne)
+ (*record_mapping) ( start, endPlusOne-start,
+ prot, dev, ino,
+ foffset, filename );
+
+ if ('\0' != tmp) {
+ filename[i_eol - i] = tmp;
+ }
+
+ i = i_eol + 1;
+ gapStart = endPlusOne;
+ }
+
+ if (record_gap && gapStart < Addr_MAX)
+ (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/.svn/text-base/priv_aspacemgr.h.svn-base b/coregrind/m_aspacemgr/.svn/text-base/priv_aspacemgr.h.svn-base
new file mode 100644
index 0000000..3ce2229
--- /dev/null
+++ b/coregrind/m_aspacemgr/.svn/text-base/priv_aspacemgr.h.svn-base
@@ -0,0 +1,130 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Module-local header file for m_aspacemgr. ---*/
+/*--- priv_aspacemgr.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PRIV_ASPACEMGR_H
+#define __PRIV_ASPACEMGR_H
+
+/* One of the important design goals of the address space manager is
+ to minimise dependence on other modules. Hence the following
+ minimal set of imports. */
+
+#include "pub_core_basics.h" // types
+#include "pub_core_vkiscnums.h" // system call numbers
+#include "pub_core_vki.h" // VKI_PAGE_SIZE, VKI_MREMAP_MAYMOVE,
+ // VKI_MREMAP_FIXED, vki_stat64
+
+#include "pub_core_debuglog.h" // VG_(debugLog)
+
+#include "pub_core_libcbase.h" // VG_(strlen), VG_(strcmp)
+ // VG_IS_PAGE_ALIGNED
+ // VG_PGROUNDDN, VG_PGROUNDUP
+
+#include "pub_core_syscall.h" // VG_(do_syscallN)
+ // VG_(mk_SysRes_Error)
+ // VG_(mk_SysRes_Success)
+
+#include "pub_core_options.h" // VG_(clo_sanity_level)
+
+#include "pub_core_aspacemgr.h" // self
+
+
+/* --------------- Implemented in aspacemgr-common.c ---------------*/
+
+/* Simple assert-like, file I/O and syscall facilities, which avoid
+ dependence on m_libcassert, and hence on the entire module graph.
+ This is important since most of the system itself depends on
+ aspacem, so we have to do this to avoid a circular dependency. */
+
+__attribute__ ((noreturn))
+extern void ML_(am_exit) ( Int status );
+extern void ML_(am_barf) ( HChar* what );
+extern void ML_(am_barf_toolow) ( HChar* what );
+
+__attribute__ ((noreturn))
+extern void ML_(am_assert_fail) ( const HChar* expr,
+ const Char* file,
+ Int line,
+ const Char* fn );
+
+#define aspacem_assert(expr) \
+ ((void) ((expr) ? 0 : \
+ (ML_(am_assert_fail)(#expr, \
+ __FILE__, __LINE__, \
+ __PRETTY_FUNCTION__))))
+
+/* Dude, what's my process ID ? */
+extern Int ML_(am_getpid)( void );
+
+/* A simple, self-contained sprintf implementation. */
+extern UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... );
+
+/* mmap et al wrappers */
+/* wrapper for munmap */
+extern SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length);
+
+/* wrapper for the ghastly 'mremap' syscall */
+extern SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
+ Addr old_addr,
+ SizeT old_len,
+ SizeT new_len
+ );
+/* ditto */
+extern SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
+ Addr old_addr, Addr old_len,
+ Addr new_addr, Addr new_len
+ );
+
+/* There is also VG_(do_mmap_NO_NOTIFY), but that's not declared
+ here (obviously). */
+
+extern SysRes ML_(am_open) ( const Char* pathname, Int flags, Int mode );
+extern void ML_(am_close) ( Int fd );
+extern Int ML_(am_read) ( Int fd, void* buf, Int count);
+extern Int ML_(am_readlink) ( HChar* path, HChar* buf, UInt bufsiz );
+
+/* Get the dev, inode and mode info for a file descriptor, if
+ possible. Returns True on success. */
+extern
+Bool ML_(am_get_fd_d_i_m)( Int fd,
+ /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode );
+
+/* ------ Implemented seperately in aspacemgr-{linux,aix5}.c ------ */
+
+/* Do a sanity check (/proc/self/maps sync check) */
+extern void ML_(am_do_sanity_check)( void );
+
+
+#endif // __PRIV_ASPACEMGR_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/aspacemgr-aix5.c b/coregrind/m_aspacemgr/aspacemgr-aix5.c
new file mode 100644
index 0000000..ce529e5
--- /dev/null
+++ b/coregrind/m_aspacemgr/aspacemgr-aix5.c
@@ -0,0 +1,2641 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager: segment initialisation and ---*/
+/*--- tracking, stack operations ---*/
+/*--- ---*/
+/*--- Implementation for AIX5 m_aspacemgr-aix5.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+
+ Neither the names of the U.S. Department of Energy nor the
+ University of California nor the names of its contributors may be
+ used to endorse or promote products derived from this software
+ without prior written permission.
+*/
+
+/* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+ ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
+ AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
+ ************************************************************* */
+
+#include "priv_aspacemgr.h"
+
+
+/* Note: many of the exported functions implemented below are
+ described more fully in comments in pub_core_aspacemgr.h.
+*/
+
+/* This provides a minimal address space management facility for AIX5.
+ It is not as comprehensive, robust or efficient as its Linux
+ counterpart.
+
+ It does implement the advise/notify concept described in
+ aspacemgr-linux.c, but minimally. It only keeps track of the
+ mappings belonging to Valgrind; the client can do what it likes so
+ long as it doesn't trash Valgrind's mappings.
+
+ This is unfortunate, but the root problem is that it is impossible
+ to find out on AIX what the complete set of mappings for a process
+ is. Sure, AIX does have /proc/pid/map, but it's weak compared to
+ Linux's: it just shows some small subset of the mappings, not all
+ of them. So it is not very useful: it can't be used to discover
+ the true initial process mapping state, and it can't be used to
+ cross-check Valgrind's internal mapping table, as is done at
+ --sanity-level=3 and above on Linux.
+*/
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The Address Space Manager's state. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Describes AIX5-specific segment kinds */
+typedef
+ enum {
+ ASkFree=1, // free space
+ ASkMText, // module text (code) mapping
+ ASkMData, // module data (& bss) mapping
+ ASkFileV, // file mapping belonging to valgrind
+ ASkAnonC, // anonymous mapping belonging to the client
+ ASkAnonV, // anonymous mapping belonging to valgrind
+ ASkShmemC, // shm mapping belonging to the client
+ ASkPreAlloc // area preallocated from sbrk
+ }
+ AixSegKind;
+
+/* Segment table entries, in summary:
+
+ ASkFree start end
+ ASkMText start end r w x sibling ismainexe fname mname
+ ASkMData start end r w x sibling
+ FileV start end r w x fname offset
+ AnonC start end r w x fromP isCH
+ AnonV start end r w x fromP
+ ShmemC start end r w x
+ PreAlloc start end
+
+ Entries are non-overlapping and cover the entire address space
+ exactly (as in the Linux aspacem). Unlike Linux there are no
+ alignment constraints, since we're just recording what's going on,
+ rather than controlling it.
+
+ MText/MData are XCOFF mapped modules, as determined by looking at
+ /proc/../map. MText is the primary entry and contains the text
+ range. MData contains the data range, if the module has a data
+ mapping (usually but not always). MText also holds the avma of the
+ corresponding data segment start, if any, (sibling field) so it can
+ be found and the two added/removed together. Similarly MData
+ contains the address of the corresponding MText (also sibling).
+
+ fname/mname only apply to MText. To find the fname/mname for MData
+ you have to look at the corresponding MText entry, which is
+ guaranteed to exist. MText may exist without a corresponding MData
+ but not vice versa. Kludge: in fact fname/mname have to be
+ allowed in MData, else read_procselfmap doesn't work.
+
+ MText may have a zero sibling pointer, indicating that there is no
+ corresponding MData. But MData must have a nonzero sibling pointer
+ since MData without MText is not allowed. Implication is that
+ neither MText nor MData may be mapped at zero as this would mess up
+ the representation, but I don't think that will ever happen since
+ AIX uses page zero as a readonly const-zero area.
+
+ For MData entries, the data section size acquired from /proc/../map
+ appears to also include the bss, so there is no need for any
+ further handling of that.
+
+ isCH indicates whether an AnonC area is part of the client heap
+ or not. May not be set for any other kind of area.
+
+ File and member names are entries into the string table.
+
+ fromP, for AnonC/AnonV, if True, indicates that the segment was
+ allocated from a PreAlloc area, and so should be returned to that
+ state upon deallocation. If False, indicates that the segment
+ should be unmapped on deallocation.
+*/
+typedef
+ struct {
+ AixSegKind kind;
+
+ /* ALL: extent */
+ /* Note: zero-length segments are not allowed. That guarantees
+ that start <= end. */
+ Addr start; // lowest addr in range (ALL)
+ Addr end; // highest addr in range (ALL)
+
+ /* ALL except Free */
+ Bool hasR;
+ Bool hasW;
+ Bool hasX;
+
+ /* misc */
+ Addr sibling; // MText, MData only: addr of MData/MText
+ Bool isMainExe; // MText only: is this the main executable?
+ Bool isCH; // AnonC only: is this part of the client's heap?
+ Bool fromP; // AnonC, AnonV only: originated from PreAlloc?
+ UChar* fname; // MText, FileV only: filename
+ UChar* mname; // MText only: member name if present
+ Off64T offset; // FileV only: file offset
+ }
+ AixSegment;
+
+
+#define VG_N_ASEGMENTS 5000
+
+typedef
+ struct {
+ AixSegment seg[VG_N_ASEGMENTS];
+ Int used;
+ }
+ AixSegments;
+
+
+/* ------ start of STATE for the address-space manager ------ */
+
+/* A table of zero-terminated strings (file names etc). This
+ is only ever added to. */
+
+#define VG_N_ASTRTAB 200000
+static Int strtab_used = 0;
+static UChar strtab[VG_N_ASTRTAB];
+
+#define Addr_MIN ((Addr)0)
+#define Addr_MAX ((Addr)(-1ULL))
+
+/* The main array of AixSegments, in order as required. */
+
+static AixSegments asegs_pri;
+
+/* and two auxiliary arrays. */
+
+static AixSegments asegs_tnew;
+static AixSegments asegs_told;
+
+/* The assumed size of the main thread's stack, so that we can add a
+ segment for it at startup. */
+
+#define N_FAKE_STACK_PAGES_MIN 4096 /* 16M fake stack */ /* default size */
+#define N_FAKE_STACK_PAGES_MAX 32768 /* 128M fake stack */ /* max size? */
+
+
+/* Hacks which are probably for AIX 'millicode'. Note: ensure
+ these stay page aligned. */
+
+#define MAGIC_PAGES_1_BASE 0x3000
+#define MAGIC_PAGES_1_SIZE (2*0x1000)
+
+#define MAGIC_PAGES_2_BASE 0xC000
+#define MAGIC_PAGES_2_SIZE (4*0x1000)
+
+
+#define AM_SANITY_CHECK(_who) \
+ do { \
+ if (VG_(clo_sanity_level >= 3)) { \
+ Bool ok = sane_AixSegments(&asegs_pri); \
+ if (!ok) \
+ VG_(debugLog)(0,"aspace", "sanity check failed, " \
+ "who = %s\n", _who); \
+ aspacem_assert(ok); \
+ } \
+ } while (0)
+
+/* When preallocating a block from sbrk-world, how much extra
+ should we pre-emptively acquire? */
+
+//#define AM_PREALLOC_EXTRA (512 * 1024)
+//#define AM_PREALLOC_EXTRA 0x0800000 /* 8 M */
+#define AM_PREALLOC_EXTRA 0x4000000 /* 64 M */
+
+/* The AIX5 aspacem implementation needs to be told when it is and
+ isn't allowed to use sbrk to allocate memory. Hence: */
+Bool VG_(am_aix5_sbrk_allowed) = True;
+
+/* ------ end of STATE for the address-space manager ------ */
+
+/* ------ Forwards decls ------ */
+static void parse_procselfmap ( /*OUT*/ AixSegments* );
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Stuff for 4K (small-page-size) rounding. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+#define AM_4K_PAGESZ 4096
+
+static Bool AM_IS_4K_ALIGNED ( UWord w )
+{
+ UWord m = AM_4K_PAGESZ-1;
+ return toBool( (w & m) == 0 );
+}
+
+static UWord AM_4K_ROUNDUP ( UWord w )
+{
+ UWord m = AM_4K_PAGESZ-1;
+ return (w+m) & (~m);
+}
+
+static UWord AM_64K_ROUNDUP ( UWord w )
+{
+ UWord m = 0x10000-1;
+ return (w+m) & (~m);
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- String table management. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Add the given string into the string table (or find an existing
+ copy of it) and return a pointer to the in-table version. The
+ pointer will be valid for the entire rest of the run. */
+
+static UChar* add_to_strtab ( UChar* str )
+{
+ Int off, len;
+ /* First, look for the string. */
+ off = 0;
+ while (off < strtab_used) {
+ if (0 == VG_(strcmp)(str, &strtab[off]))
+ return &strtab[off];
+ off += VG_(strlen)(&strtab[off]) + 1;
+ }
+ /* not present? we'll have to copy it then. */
+ len = VG_(strlen)(str);
+ if (len + 1 + strtab_used > VG_N_ASTRTAB)
+ ML_(am_barf_toolow)("VG_N_ASTRTAB");
+ off = strtab_used;
+ for (; *str; str++)
+ strtab[strtab_used++] = *str;
+ strtab[strtab_used++] = 0;
+ aspacem_assert(strtab_used <= VG_N_ASTRTAB);
+ return &strtab[off];
+}
+
+
+static Bool is_in_strtab ( UChar* str )
+{
+ if (str < &strtab[0])
+ return False;
+ if (str >= &strtab[strtab_used])
+ return False;
+ if (str > &strtab[0] && str[-1] != 0)
+ return False;
+ return True;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Low level AixSegment stuff. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static void init_AixSegment ( AixSegment* s )
+{
+ s->kind = 0; /* invalid */
+ s->start = 0;
+ s->end = 0;
+ s->hasR = False;
+ s->hasW = False;
+ s->hasX = False;
+ s->sibling = 0;
+ s->isMainExe = False;
+ s->isCH = False;
+ s->fromP = False;
+ s->fname = NULL;
+ s->mname = NULL;
+ s->offset = 0;
+}
+
+
+static HChar* name_of_AixSegKind ( AixSegKind sk )
+{
+ switch (sk) {
+ case ASkFree: return "Free ";
+ case ASkMText: return "MText";
+ case ASkMData: return "MData";
+ case ASkAnonV: return "AnonV";
+ case ASkAnonC: return "AnonC";
+ case ASkFileV: return "FileV";
+ case ASkShmemC: return "ShmC ";
+ case ASkPreAlloc: return "PreAl";
+ default: ML_(am_barf)("name_of_AixSegKind");
+ /*NOTREACHED*/
+ return NULL;
+ }
+}
+
+
+static
+void show_AixSegment ( Int logLevel, Int segNo, AixSegment* seg )
+{
+ HChar* segName = name_of_AixSegKind( seg->kind );
+ switch (seg->kind) {
+ case ASkFree:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx\n",
+ segNo, /*segName*/" ",
+ (ULong)seg->start, (ULong)seg->end
+ );
+ break;
+ case ASkMText:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- (d %010llx) %s%s%s%s\n",
+ segNo, seg->isMainExe ? "MTEXT" : "MText",
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ (ULong)seg->sibling,
+ seg->fname,
+ seg->mname ? "(" : "",
+ seg->mname ? (HChar*)seg->mname : "",
+ seg->mname ? ")" : ""
+ );
+ break;
+ case ASkMData:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- (t %010llx)\n",
+ segNo, "MData",
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ (ULong)seg->sibling
+ );
+ break;
+ case ASkFileV:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- %6lld %s\n",
+ segNo, segName,
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ seg->offset,
+ seg->fname
+ );
+ break;
+ case ASkAnonV:
+ case ASkAnonC:
+ case ASkShmemC:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c%c%c\n",
+ segNo, segName,
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ seg->kind==ASkAnonC && seg->isCH ? 'H' : '-',
+ seg->fromP ? 'P' : '-'
+ );
+ break;
+ case ASkPreAlloc:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %c%c%c-- (size %llu)\n",
+ segNo, segName,
+ (ULong)seg->start, (ULong)seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-',
+ (ULong)seg->end - (ULong)seg->start + 1
+ );
+ break;
+ default:
+ VG_(debugLog)(logLevel, "aspacem",
+ "%3d: show_AixSegment: unknown segment\n",
+ segNo);
+ break;
+ }
+}
+
+
+static void init_AixSegments ( AixSegments* segs )
+{
+ segs->used = 1;
+ init_AixSegment( &segs->seg[0] );
+ segs->seg[0].kind = ASkFree;
+ segs->seg[0].start = Addr_MIN;
+ segs->seg[0].end = Addr_MAX;
+}
+
+
+static
+void show_AixSegments ( Int logLevel, HChar* who, AixSegments* segs )
+{
+ Int i;
+ VG_(debugLog)(logLevel, "aspacem", "<<< %s\n", who);
+ for (i = 0; i < segs->used; i++)
+ show_AixSegment( logLevel, i, &segs->seg[i] );
+ VG_(debugLog)(logLevel, "aspacem", ">>>\n");
+}
+
+
+static Bool sane_AixSegment ( AixSegment* seg )
+{
+ /* disallow zero and negative length segments */
+ if (seg->end < seg->start)
+ return False;
+
+ switch (seg->kind) {
+ case ASkFree:
+ if (seg->hasR || seg->hasW || seg->hasX)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0 || seg->offset != 0)
+ return False;
+ if (seg->fname || seg->mname)
+ return False;
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkMText:
+ if (!is_in_strtab(seg->fname))
+ return False;
+ if (seg->mname && !is_in_strtab(seg->mname))
+ return False;
+ if (seg->offset != 0)
+ return False;
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkMData:
+ if (seg->isMainExe || seg->sibling == 0 || seg->offset != 0)
+ return False;
+ /* fname/mname have to be allowed in MData, else
+ read_procselfmap doesn't work. Unfortunately. */
+ /*
+ if (seg->fname || seg->mname)
+ return False;
+ */
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkFileV:
+ if (!is_in_strtab(seg->fname))
+ return False;
+ if (seg->mname != NULL)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0)
+ return False;
+ if (seg->isCH || seg->fromP)
+ return False;
+ break;
+ case ASkShmemC:
+ case ASkAnonV:
+ case ASkAnonC:
+ if (seg->fname || seg->mname)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0)
+ return False;
+ if (seg->offset != 0)
+ return False;
+ if (seg->kind != ASkAnonC && seg->isCH)
+ return False;
+ if ( (!(seg->kind == ASkAnonV || seg->kind == ASkAnonC))
+ && seg->fromP)
+ return False;
+ break;
+ case ASkPreAlloc:
+ if (seg->fname || seg->mname)
+ return False;
+ if (seg->isMainExe || seg->sibling != 0)
+ return False;
+ if (seg->offset != 0)
+ return False;
+ if (seg->kind != ASkAnonC && seg->isCH)
+ return False;
+ if (seg->fromP)
+ return False;
+ if (!AM_IS_4K_ALIGNED(seg->start))
+ return False;
+ if (!AM_IS_4K_ALIGNED(seg->end + 1))
+ return False;
+ if (!(seg->hasR && seg->hasW && seg->hasX))
+ return False;
+ break;
+ default:
+ return False;
+ }
+ return True;
+}
+
+
+/* Binary search the interval array for a given address. Since the
+ array covers the entire address space the search cannot fail. */
+static Int find_asegment_idx ( AixSegments* segs, Addr a )
+{
+ Addr a_mid_lo, a_mid_hi;
+ Int mid,
+ lo = 0,
+ hi = segs->used-1;
+ aspacem_assert(lo <= hi);
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) {
+ /* Not found. This can't happen. */
+ ML_(am_barf)("find_nsegment_idx: not found");
+ }
+ mid = (lo + hi) / 2;
+ a_mid_lo = segs->seg[mid].start;
+ a_mid_hi = segs->seg[mid].end;
+
+ if (a < a_mid_lo) { hi = mid-1; continue; }
+ if (a > a_mid_hi) { lo = mid+1; continue; }
+ aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
+ aspacem_assert(0 <= mid && mid < segs->used);
+ return mid;
+ }
+}
+
+
+static Bool sane_AixSegments ( AixSegments* segs )
+{
+ Int i;
+
+ /* Check endpoints */
+ if (segs->used < 1 || segs->used > VG_N_ASEGMENTS) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad ->used");
+ return False;
+ }
+ if (segs->seg[0].start != Addr_MIN
+ || segs->seg[segs->used-1].end != Addr_MAX) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad endpoints");
+ return False;
+ }
+
+ /* Check each segment, and check entire range is covered. */
+ for (i = 0; i < segs->used; i++) {
+ if (!sane_AixSegment( &segs->seg[i] )) {
+ VG_(debugLog)(0, "aspacem",
+ "sane_AixSegments: bad segment %d\n", i);
+ return False;
+ }
+ }
+ for (i = 1; i < segs->used; i++) {
+ if (segs->seg[i-1].end + 1 != segs->seg[i].start) {
+ VG_(debugLog)(0, "aspacem",
+ "sane_AixSegments: bad transition at %d/%d\n", i-1,i);
+ return False;
+ }
+ }
+
+ /* Now we know 'seg' is safe for use in find_asegment_idx().
+ Check the sibling pointers for MText/MData.
+
+ Also check that the segment starting at address zero is neither
+ MText nor MData (since this would mess up the sibling pointer
+ representation; see comments above.) Failure of this is not per
+ se a logic failure, but it does indicate that the kernel
+ unexpectedly placed MText or MData at zero, and our
+ representation is therefore inadequate.
+ */
+ if (segs->seg[0].kind == ASkMText || segs->seg[0].kind == ASkMData) {
+ VG_(debugLog)(0, "aspacem",
+ "sane_AixSegments: ASkMText/ASkMData at address zero\n");
+ return False;
+ }
+
+ for (i = 0; i < segs->used-1; i++) {
+
+ AixSegment *s1, *s2;
+
+ s1 = &segs->seg[i];
+
+ if (s1->kind == ASkMData) {
+ s2 = &segs->seg[ find_asegment_idx(segs, s1->sibling) ];
+ if (s2->kind != ASkMText
+ || find_asegment_idx(segs, s2->sibling) != i) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad sibling "
+ "link(s) for ASkData\n");
+ return False;
+ }
+ }
+
+ if (s1->kind == ASkMText && s1->sibling != 0) {
+ s2 = &segs->seg[ find_asegment_idx(segs, s1->sibling) ];
+ if (s2->kind != ASkMData
+ || find_asegment_idx(segs, s2->sibling) != i) {
+ VG_(debugLog)(0, "aspacem", "sane_AixSegments: bad sibling "
+ "link(s) for ASkText\n");
+ return False;
+ }
+ }
+
+ }
+
+ return True;
+}
+
+
+/* Try merging s2 into s1, if possible. If successful, s1 is
+ modified, and True is returned. Otherwise s1 is unchanged and
+ False is returned. */
+
+static Bool maybe_merge_asegments ( AixSegment* s1, AixSegment* s2 )
+{
+ if (s1->kind != s2->kind)
+ return False;
+
+ if (s1->end+1 != s2->start)
+ return False;
+
+ switch (s1->kind) {
+
+ case ASkFree:
+ s1->end = s2->end;
+ return True;
+
+ case ASkAnonC:
+ case ASkAnonV:
+ if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
+ && s1->hasX == s2->hasX && s1->isCH == s2->isCH
+ && s1->fromP == s2->fromP) {
+ s1->end = s2->end;
+ return True;
+ }
+ break;
+
+ /* not really necessary, but .. */
+ case SkFileV:
+ if (s1->hasR == s2->hasR
+ && s1->hasW == s2->hasW && s1->hasX == s2->hasX
+ && s1->fname == s2->fname
+ && s2->offset == s1->offset
+ + ((ULong)s2->start) - ((ULong)s1->start) ) {
+ s1->end = s2->end;
+ return True;
+ }
+ break;
+
+ /* it's important to merge PreAlloc's back together to avoid
+ fragmenting PreAlloc'd space unnecessarily */
+ case ASkPreAlloc:
+ s1->end = s2->end;
+ return True;
+
+ default:
+ break;
+ }
+
+ return False;
+}
+
+
+/* Merge mergable segments in SEGS. */
+
+static void preen_asegments ( AixSegments* segs )
+{
+ Int r, w;
+
+ aspacem_assert(segs->used >= 1);
+ if (segs->used == 1)
+ return;
+
+ w = 0;
+ for (r = 1; r < segs->used; r++) {
+ if (maybe_merge_asegments(&segs->seg[w], &segs->seg[r])) {
+ /* nothing */
+ } else {
+ w++;
+ if (w != r)
+ segs->seg[w] = segs->seg[r];
+ }
+ }
+ w++;
+ aspacem_assert(w > 0 && w <= segs->used);
+ segs->used = w;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Modifying a segment array, and constructing segments. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Split the segment containing 'a' into two, so that 'a' is
+ guaranteed to be the start of a new segment. If 'a' is already the
+ start of a segment, do nothing. */
+
+static void split_asegment_at ( AixSegments* segs, Addr a )
+{
+ Int i, j;
+
+ aspacem_assert(a > 0);
+ aspacem_assert(segs->used >= 1);
+
+ i = find_asegment_idx(segs, a);
+ aspacem_assert(i >= 0 && i < segs->used);
+
+ if (segs->seg[i].start == a)
+ /* 'a' is already the start point of a segment, so nothing to be
+ done. */
+ return;
+
+ /* else we have to slide the segments upwards to make a hole */
+ if (segs->used >= VG_N_ASEGMENTS)
+ ML_(am_barf_toolow)("VG_N_ASEGMENTS");
+ for (j = segs->used-1; j > i; j--)
+ segs->seg[j+1] = segs->seg[j];
+ segs->used++;
+
+ segs->seg[i+1] = segs->seg[i];
+ segs->seg[i+1].start = a;
+ segs->seg[i].end = a-1;
+
+ if (segs->seg[i].kind == ASkFileV /* || segs->seg[i].kind == ASkFileC*/)
+ segs->seg[i+1].offset
+ += ((ULong)segs->seg[i+1].start) - ((ULong)segs->seg[i].start);
+
+ aspacem_assert(sane_AixSegment(&segs->seg[i]));
+ aspacem_assert(sane_AixSegment(&segs->seg[i+1]));
+}
+
+
+/* Do the minimum amount of segment splitting necessary to ensure that
+ sLo is the first address denoted by some segment and sHi is the
+ highest address denoted by some other segment. Returns the indices
+ of the lowest and highest segments in the range. */
+
+static
+void split_asegments_lo_and_hi ( AixSegments* segs,
+ Addr sLo, Addr sHi,
+ /*OUT*/Int* iLo,
+ /*OUT*/Int* iHi )
+{
+ aspacem_assert(sLo < sHi);
+
+ if (sLo > 0)
+ split_asegment_at(segs, sLo);
+ if (sHi < Addr_MAX)
+ split_asegment_at(segs, sHi+1);
+
+ *iLo = find_asegment_idx(segs,sLo);
+ *iHi = find_asegment_idx(segs,sHi);
+ aspacem_assert(0 <= *iLo && *iLo < segs->used);
+ aspacem_assert(0 <= *iHi && *iHi < segs->used);
+ aspacem_assert(*iLo <= *iHi);
+ aspacem_assert(segs->seg[*iLo].start == sLo);
+ aspacem_assert(segs->seg[*iHi].end == sHi);
+ /* Not that I'm overly paranoid or anything, definitely not :-) */
+}
+
+
+/* Add SEG to the collection, deleting/truncating any it overlaps.
+ This deals with all the tricky cases of splitting up segments as
+ needed. Contents of SEG are copied. */
+
+static void add_asegment ( AixSegments* segs, AixSegment* seg )
+{
+ Int i, iLo, iHi, delta;
+ Bool segment_is_sane;
+
+ Addr sStart = seg->start;
+ Addr sEnd = seg->end;
+
+ aspacem_assert(sStart <= sEnd);
+
+ segment_is_sane = sane_AixSegment(seg);
+ if (!segment_is_sane) show_AixSegment(0,0,seg);
+ aspacem_assert(segment_is_sane);
+
+ split_asegments_lo_and_hi( segs, sStart, sEnd, &iLo, &iHi );
+
+ /* Now iLo .. iHi inclusive is the range of segment indices which
+ seg will replace. If we're replacing more than one segment,
+ slide those above the range down to fill the hole. */
+ delta = iHi - iLo;
+ aspacem_assert(delta >= 0);
+ if (delta > 0) {
+ for (i = iLo; i < segs->used-delta; i++)
+ segs->seg[i] = segs->seg[i+delta];
+ segs->used -= delta;
+ }
+ aspacem_assert(segs->used >= 1);
+
+ segs->seg[iLo] = *seg;
+
+ preen_asegments(segs);
+ if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
+}
+
+
+/* Convert everything in SEG except MData and MText into Free,
+ then preen, so as to retain normalised form. */
+
+static void knockout_non_module_segs ( AixSegments* segs )
+{
+ Int i;
+ Addr s, e;
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind == ASkFree
+ || segs->seg[i].kind == ASkMText
+ || segs->seg[i].kind == ASkMData)
+ continue;
+ s = segs->seg[i].start;
+ e = segs->seg[i].end;
+ init_AixSegment( &segs->seg[i] );
+ segs->seg[i].start = s;
+ segs->seg[i].end = e;
+ segs->seg[i].kind = ASkFree;
+ }
+ preen_asegments(segs);
+ aspacem_assert( sane_AixSegments(segs) );
+}
+
+
+/* Copy a segment array. */
+
+static void copy_asegments_d_s ( AixSegments* dst, AixSegments* src )
+{
+ Int i;
+ aspacem_assert(src->used >= 1 && src->used < VG_N_ASEGMENTS);
+ dst->used = src->used;
+ for (i = 0; i < src->used; i++)
+ dst->seg[i] = src->seg[i];
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Re-reading /proc/../map and updating MText/MData segments ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Find out the size of the AixCodeSegChange that must be
+ presented to VG_(am_aix5_reread_procmap). */
+
+Int VG_(am_aix5_reread_procmap_howmany_directives)(void)
+{
+ /* In the worst imaginable case, all the tracked modules could have
+ disappeared and been replaced with different ones. Hence: */
+ return 2 * VG_N_ASEGMENTS;
+}
+
+
+static
+void add_pri_text_and_data_segs ( AixSegment* tnew, AixSegment* dnew )
+{
+ Bool dExists = (dnew->end - dnew->start + 1) != 0;
+ aspacem_assert(tnew->kind == ASkMText);
+ aspacem_assert(dnew->kind == ASkMData);
+ if (dExists) {
+ aspacem_assert(tnew->sibling == dnew->start);
+ aspacem_assert(dnew->sibling == tnew->start);
+ add_asegment(&asegs_pri, tnew);
+ add_asegment(&asegs_pri, dnew);
+ } else {
+ aspacem_assert(tnew->sibling == 0);
+ add_asegment(&asegs_pri, tnew);
+ }
+}
+
+static
+void del_pri_text_and_data_segs ( AixSegment* told, AixSegment* dold )
+{
+ AixSegment fre;
+ Bool dExists = (dold->end - dold->start + 1) != 0;
+ aspacem_assert(told->kind == ASkMText);
+ aspacem_assert(dold->kind == ASkMData);
+ init_AixSegment( &fre );
+ fre.kind = ASkFree;
+ if (dExists) {
+ aspacem_assert(told->sibling == dold->start);
+ aspacem_assert(dold->sibling == told->start);
+ fre.start = told->start;
+ fre.end = told->end;
+ add_asegment(&asegs_pri, &fre);
+ fre.start = dold->start;
+ fre.end = dold->end;
+ add_asegment(&asegs_pri, &fre);
+ } else {
+ aspacem_assert(told->sibling == 0);
+ fre.start = told->start;
+ fre.end = told->end;
+ add_asegment(&asegs_pri, &fre);
+ }
+}
+
+
+/* Tell aspacem that /proc/<pid>/map may have changed (eg following
+ __loadx) and so it should be re-read, and the code/data segment
+ list updated accordingly. The resulting array of AixCodeChangeSeg
+ directives are written to 'directives', and the number of entries
+ to *ndirectives. */
+
+void VG_(am_aix5_reread_procmap)
+ ( /*OUT*/AixCodeSegChange* directives, /*OUT*/Int* ndirectives )
+{
+ Int ixold, ixnew;
+ Bool done_old, done_new;
+ AixSegment *olds, *news;
+
+ /* First, read /proc/../map into asegs_tnew. Copy asegs_pri into
+ asegs_told, and remove everything except MData and MText, so as
+ to generate something we can sanely compare with asegs_tnew.
+ Walk asegs_told and asegs_tnew together, writing the differences
+ to 'directives', and modifying asegs_pri accordingly. */
+ parse_procselfmap( &asegs_tnew );
+ copy_asegments_d_s( &asegs_told, &asegs_pri );
+ knockout_non_module_segs( &asegs_told );
+
+ *ndirectives = 0;
+
+# define MODIFY_PRI(_dir, _asegs, _ixt, _acquire) \
+ do { \
+ Int _ixd; \
+ AixSegment *_segt, *_segd; \
+ AixSegment _segd_dummy; \
+ aspacem_assert(_ixt >= 0 && _ixt < _asegs.used); \
+ _segt = &_asegs.seg[_ixt]; \
+ aspacem_assert(_segt->kind == ASkMText); \
+ if (_segt->sibling) { \
+ _ixd = find_asegment_idx( &_asegs, _segt->sibling ); \
+ _segd = &_asegs.seg[_ixd]; \
+ aspacem_assert(_segd->kind == ASkMData); \
+ aspacem_assert(_segt->sibling == _segd->start); \
+ } else { \
+ init_AixSegment( &_segd_dummy ); \
+ _segd_dummy.kind = ASkMData; \
+ _segd_dummy.start = 1; \
+ _segd_dummy.end = 0; \
+ _segd = &_segd_dummy; \
+ } \
+ if (_segd != &_segd_dummy) \
+ aspacem_assert(_segd->sibling == _segt->start); \
+ \
+ (_dir).code_start = (_segt)->start; \
+ (_dir).code_len = (_segt)->end - (_segt)->start + 1; \
+ (_dir).data_start = (_segd)->start; \
+ (_dir).data_len = (_segd)->end - (_segd)->start + 1; \
+ (_dir).file_name = (_segt)->fname; \
+ (_dir).mem_name = (_segt)->mname; \
+ (_dir).is_mainexe = (_acquire) ? (_segt)->isMainExe : False; \
+ (_dir).acquire = (_acquire); \
+ \
+ if (_acquire) { \
+ add_pri_text_and_data_segs( _segt, _segd ); \
+ } else { \
+ del_pri_text_and_data_segs( _segt, _segd ); \
+ } \
+ } while (0)
+
+ ixold = 0; /* indexes asegs_told */
+ ixnew = 0; /* indexes asegs_tnew */
+
+ while (True) {
+
+ aspacem_assert(ixold >= 0 && ixold < asegs_told.used);
+ aspacem_assert(ixnew >= 0 && ixnew < asegs_tnew.used);
+
+ /* Advance ixold and ixnew to the next MText in their
+ respective arrays. */
+ while (ixold < asegs_told.used
+ && asegs_told.seg[ixold].kind != ASkMText) {
+ aspacem_assert(asegs_told.seg[ixold].kind == ASkFree
+ || asegs_told.seg[ixold].kind == ASkMData);
+ ixold++;
+ }
+ while (ixnew < asegs_tnew.used
+ && asegs_tnew.seg[ixnew].kind != ASkMText) {
+ aspacem_assert(asegs_tnew.seg[ixnew].kind == ASkFree
+ || asegs_tnew.seg[ixnew].kind == ASkMData);
+ ixnew++;
+ }
+
+ aspacem_assert(ixold >= 0 && ixold <= asegs_told.used);
+ aspacem_assert(ixnew >= 0 && ixnew <= asegs_tnew.used);
+
+ done_old = ixold == asegs_told.used;
+ done_new = ixnew == asegs_tnew.used;
+
+ if (done_old && done_new)
+ goto both_done;
+ if (done_old && !done_new)
+ goto finishup_new;
+ if (done_new && !done_old)
+ goto finishup_old;
+
+ olds = &asegs_told.seg[ixold];
+ news = &asegs_tnew.seg[ixnew];
+
+ aspacem_assert(olds->kind == ASkMText);
+ aspacem_assert(news->kind == ASkMText);
+
+ if (0) {
+ show_AixSegment(0,ixold,&asegs_told.seg[ixold]);
+ show_AixSegment(0,ixnew,&asegs_tnew.seg[ixnew]);
+ VG_(debugLog)(0, "aspacem", "\n");
+ }
+
+ /* Here, if olds->start < news->start, then the old sequence has
+ an entry which the new one doesn't, so a module has been
+ unloaded. If news->start < olds->start then the new sequence
+ has a module the old one doesn't, so a module has been
+ loaded. If news->start ==olds->start then the module is
+ unchanged. Except, we should check a bit more carefully in
+ the zero case. */
+ if (olds->start == news->start) {
+ if (olds->start == news->start
+ && olds->end == news->end
+ && olds->fname == news->fname
+ && olds->mname == news->mname
+ && olds->sibling == news->sibling
+ && olds->isMainExe == news->isMainExe) {
+ /* really identical, do nothing */
+ } else {
+ /* Dubious; mark it as an unload of old and load of
+ new. */
+ MODIFY_PRI(directives[*ndirectives], asegs_told, ixold, False);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ MODIFY_PRI(directives[*ndirectives], asegs_tnew, ixnew, True);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ }
+ ixold++;
+ ixnew++;
+ continue;
+ }
+
+ if (olds->start < news->start) {
+ /* discard olds */
+ MODIFY_PRI(directives[*ndirectives], asegs_told, ixold, False);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ ixold++;
+ continue;
+ }
+
+ if (news->start < olds->start) {
+ /* acquire news */
+ MODIFY_PRI(directives[*ndirectives], asegs_tnew, ixnew, True);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ ixnew++;
+ continue;
+ }
+ /* NOTREACHED */
+ aspacem_assert(0);
+ }
+
+ finishup_new:
+ olds = NULL;
+ aspacem_assert(ixold == asegs_told.used);
+ aspacem_assert(ixnew < asegs_tnew.used);
+ while (ixnew < asegs_tnew.used) {
+ news = &asegs_tnew.seg[ixnew];
+ aspacem_assert(news->kind == ASkMText || news->kind == ASkMData
+ || news->kind == ASkFree);
+ if (news->kind == ASkMText) {
+ MODIFY_PRI(directives[*ndirectives], asegs_tnew, ixnew, True);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ }
+ ixnew++;
+ }
+ goto both_done;
+
+ finishup_old:
+ news = NULL;
+ aspacem_assert(ixnew == asegs_tnew.used);
+ aspacem_assert(ixold < asegs_told.used);
+ while (ixold < asegs_told.used) {
+ olds = &asegs_told.seg[ixold];
+ aspacem_assert(olds->kind == ASkMText || olds->kind == ASkMData
+ || olds->kind == ASkFree);
+ if (olds->kind == ASkMText) {
+ MODIFY_PRI(directives[*ndirectives], asegs_told, ixold, False);
+ (*ndirectives)++;
+ aspacem_assert(*ndirectives <= 2 * VG_N_ASEGMENTS);
+ }
+ ixold++;
+ }
+ goto both_done;
+
+ both_done:
+ aspacem_assert(ixold == asegs_told.used);
+ aspacem_assert(ixnew == asegs_tnew.used);
+
+ asegs_tnew.used = 0;
+ asegs_told.used = 0;
+
+ aspacem_assert( sane_AixSegments(&asegs_pri) );
+
+# undef MODIFY_PRI
+}
+
+
+/* Set the initial stack segment. Contains kludgery. Also take the
+ opportunity to create fake segs for the millicode areas. */
+
+void VG_(am_aix5_set_initial_client_sp)( Addr sp )
+{
+ static Bool done = False;
+ AixSegment seg;
+ Word n_fake_stack_pages;
+ Word m1 = 1048576;
+
+ aspacem_assert(!done);
+ done = True;
+
+ /* We are given the initial client SP (that of the root thread).
+ Already on the stack are argv and env. How far up does it
+ extend? We assume to the next 64k boundary. How far down does
+ it extend? We assume N_FAKE_STACK_PAGES small pages - by
+ default 16M. Establish those limits and add an AnonC rwx
+ segment. */
+
+ /* The 64k boundary is "justified" as follows. On 32-bit AIX 5.3,
+ a typical initial SP is 0x2FF22xxx, but the accessible (rw) area
+ beyond that extends up to 0x2FF2FFFF - the next 64k boundary.
+ In 64-bit mode, a typical initial SP might be
+ 0xFFF'FFFF'FFFF'E920, and the accessible area extends to
+ 0xFFF'FFFF'FFFF'FFFF. So in both cases, (64k roundup of sp) - 1
+ gives the end of the accessible area. */
+ VG_(debugLog)(1,"aspacem", "aix5_set_initial_client_sp( %p )\n",
+ (void*)sp);
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.hasR = seg.hasW = seg.hasX = True;
+
+ if (sizeof(void*) == 4
+ && ((sp & 0xFFFF0000) == 0x2FF20000
+ || (sp & 0xFFFF0000) == 0x2FF10000)) {
+ /* Gaaah. Special-case 32-bit mode. */
+ seg.end = 0x2FF2FFFF;
+ } else {
+ seg.end = AM_64K_ROUNDUP(sp) - 1;
+ }
+
+ n_fake_stack_pages = N_FAKE_STACK_PAGES_MIN;
+ if (VG_(clo_main_stacksize) > 0
+ && ((m1+VG_(clo_main_stacksize)) / VKI_PAGE_SIZE) > n_fake_stack_pages) {
+ n_fake_stack_pages = (m1+VG_(clo_main_stacksize)) / VKI_PAGE_SIZE;
+ }
+ if (n_fake_stack_pages > N_FAKE_STACK_PAGES_MAX) {
+ /* Allocation of the stack failed. We have to stop. */
+ VG_(debugLog)(
+ 0, "aspacem",
+ "valgrind: "
+ "I failed to allocate space for the application's stack.\n");
+ VG_(debugLog)(
+ 0, "aspacem",
+ "valgrind: "
+ "This may be the result of a very large --max-stackframe=\n");
+ VG_(debugLog)(
+ 0, "aspacem",
+ "valgrind: "
+ "setting. Cannot continue. Sorry.\n\n");
+ ML_(am_exit)(0);
+ }
+
+ seg.start = seg.end+1 - n_fake_stack_pages * VKI_PAGE_SIZE;
+
+ VG_(debugLog)(1,"aspacem", "aix5_set_initial_client_sp: stack seg:\n");
+ show_AixSegment(1,0, &seg);
+ add_asegment( &asegs_pri, &seg );
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.hasR = seg.hasX = True;
+ seg.start = MAGIC_PAGES_1_BASE;
+ seg.end = MAGIC_PAGES_1_BASE + MAGIC_PAGES_1_SIZE - 1;
+ VG_(debugLog)(1,"aspacem", "am_aix5_set_initial_client_sp: FAKE1 seg:\n");
+ show_AixSegment(1,0, &seg);
+ add_asegment( &asegs_pri, &seg );
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.hasR = seg.hasX = True;
+ seg.start = MAGIC_PAGES_2_BASE;
+ seg.end = MAGIC_PAGES_2_BASE + MAGIC_PAGES_2_SIZE - 1;
+ VG_(debugLog)(1,"aspacem", "am_aix5_set_initial_client_sp: FAKE2 seg:\n");
+ show_AixSegment(1,0, &seg);
+ add_asegment( &asegs_pri, &seg );
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Getting segment-starts. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Print out the segment array (debugging only!). */
+void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
+{
+ show_AixSegments( logLevel, who, &asegs_pri );
+}
+
+/* Get the filename corresponding to this segment, if known and if it
+ has one. The returned name's storage cannot be assumed to be
+ persistent, so the caller should immediately copy the name
+ elsewhere. On AIX5, we don't know what this is (in general)
+ so just return NULL. */
+HChar* VG_(am_get_filename)( NSegment const* seg )
+{
+ return NULL;
+}
+
+/* Collect up the start addresses of all non-free, non-resvn segments.
+ The interface is a bit strange in order to avoid potential
+ segment-creation races caused by dynamic allocation of the result
+ buffer *starts.
+
+ The function first computes how many entries in the result
+ buffer *starts will be needed. If this number <= nStarts,
+ they are placed in starts[0..], and the number is returned.
+ If nStarts is not large enough, nothing is written to
+ starts[0..], and the negation of the size is returned.
+
+ Correct use of this function may mean calling it multiple times in
+ order to establish a suitably-sized buffer. */
+
+Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
+{
+ Int i, j, nSegs;
+
+ /* don't pass dumbass arguments */
+ aspacem_assert(nStarts >= 0);
+
+ nSegs = 0;
+ for (i = 0; i < asegs_pri.used; i++) {
+ if (asegs_pri.seg[i].kind == ASkFree
+ || asegs_pri.seg[i].kind == ASkPreAlloc)
+ continue;
+ nSegs++;
+ }
+
+ if (nSegs > nStarts) {
+ /* The buffer isn't big enough. Tell the caller how big it needs
+ to be. */
+ return -nSegs;
+ }
+
+ /* There's enough space. So write into the result buffer. */
+ aspacem_assert(nSegs <= nStarts);
+
+ j = 0;
+ for (i = 0; i < asegs_pri.used; i++) {
+ if (asegs_pri.seg[i].kind == ASkFree
+ || asegs_pri.seg[i].kind == ASkPreAlloc)
+ continue;
+ starts[j++] = asegs_pri.seg[i].start;
+ }
+
+ aspacem_assert(j == nSegs); /* this should not fail */
+ return nSegs;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Sanity checking and preening of the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+Bool VG_(am_do_sync_check) ( const HChar* fn,
+ const HChar* file, Int line )
+{
+ /* There's nothing we can do here; just return a dummy value. */
+ return False; /* placate gcc */
+}
+
+/* Hook to allow sanity checks to be done from aspacemgr-common.c. */
+void ML_(am_do_sanity_check)( void )
+{
+ Bool ok = sane_AixSegments( &asegs_pri );
+ aspacem_assert(ok);
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Finding segments. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Finds the segment containing 'a'. Only returns file/anon/resvn
+ segments. On AIX5 this is pretty bogus; we fake up an entry as
+ best we can by snooping round for useful information in
+ asegs_pri. */
+
+NSegment const* VG_(am_find_nsegment) ( Addr a )
+{
+ Int i;
+ AixSegment* aseg;
+ static NSegment bogus;
+
+ /* Fill in default info. */
+ bogus.kind = SkAnonC;
+ bogus.start = 0;
+ bogus.end = 0;
+ bogus.smode = SmFixed;
+ bogus.dev = 0;
+ bogus.ino = 0;
+ bogus.mode = 0;
+ bogus.offset = 0;
+ bogus.fnIdx = -1;
+ bogus.hasR = bogus.hasW = bogus.hasX = False;
+ bogus.hasT = False;
+ bogus.isCH = False;
+ bogus.mark = False;
+
+ /* Go look for it in the segment table. */
+ i = find_asegment_idx( &asegs_pri, a );
+ aspacem_assert(i >= 0 && i <= asegs_pri.used);
+
+ aseg = &asegs_pri.seg[i];
+ if (aseg->kind == ASkFree || aseg->kind == ASkPreAlloc)
+ return NULL;
+
+ bogus.start = aseg->start;
+ bogus.end = aseg->end;
+
+ /* Refine */
+ switch (aseg->kind) {
+ case ASkMText:
+ bogus.kind = SkAnonC; /* hmm, pretty darn bogus */
+ bogus.hasR = bogus.hasX = True;
+ break;
+ case ASkMData:
+ bogus.kind = SkAnonC; /* hmm, pretty darn bogus */
+ bogus.hasR = bogus.hasW = True;
+ break;
+ case ASkShmemC:
+ bogus.kind = SkShmC;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ break;
+ case ASkAnonC:
+ bogus.kind = SkAnonC;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ bogus.isCH = aseg->isCH;
+ break;
+ case ASkAnonV:
+ bogus.kind = SkAnonV;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ break;
+ case ASkFileV:
+ bogus.kind = SkFileV;
+ bogus.hasR = aseg->hasR;
+ bogus.hasW = aseg->hasW;
+ bogus.hasX = aseg->hasX;
+ bogus.offset = aseg->offset;
+ break;
+ default:
+ aspacem_assert(0);
+ }
+
+ return &bogus;
+}
+
+
+/* Find the next segment along from 'here', if it is a file/anon/resvn
+ segment. */
+NSegment const* VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
+{
+ ML_(am_barf)("unimplemented: VG_(am_next_nsegment)");
+ return NULL; /* placate gcc */
+}
+
+
+/* Trivial fn: return the total amount of space in anonymous mappings,
+ both for V and the client. Is used for printing stats in
+ out-of-memory messages. */
+ULong VG_(am_get_anonsize_total)( void )
+{
+ Int i;
+ ULong total = 0;
+ for (i = 0; i < asegs_pri.used; i++) {
+ if (asegs_pri.seg[i].kind == ASkAnonC
+ || asegs_pri.seg[i].kind == ASkAnonV) {
+ total += (ULong)asegs_pri.seg[i].end
+ - (ULong)asegs_pri.seg[i].start + 1ULL;
+ }
+ }
+ return total;
+}
+
+
+/* Test if a piece of memory is addressable by the client with at
+ least the "prot" protection permissions by examining the underlying
+ segments. */
+Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
+ UInt prot )
+{
+ NSegment const * const fake = VG_(am_find_nsegment)(start);
+ if (!fake)
+ return False;
+ aspacem_assert(fake->start <= start);
+ aspacem_assert(start + len - 1 <= fake->end);
+ if (fake->kind == SkAnonV || fake->kind == SkFileV)
+ return False;
+ if ((prot & VKI_PROT_READ) && !fake->hasR)
+ return False;
+ if ((prot & VKI_PROT_WRITE) && !fake->hasW)
+ return False;
+ if ((prot & VKI_PROT_EXEC) && !fake->hasX)
+ return False;
+ return True;
+}
+
+/* Variant of VG_(am_is_valid_for_client) which allows free areas to
+ be considered part of the client's addressable space. It also
+ considers reservations to be allowable, since from the client's
+ point of view they don't exist. */
+Bool VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( Addr start, SizeT len, UInt prot )
+{
+ ML_(am_barf)("unimplemented: "
+ "VG_(am_is_valid_for_client_or_free_or_resvn)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Startup, including reading /proc/self/maps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Initialise the address space manager, setting up the initial
+ segment list, and reading /proc/self/maps into it. This must
+ be called before any other function.
+
+ Takes a pointer to the SP at the time V gained control. This is
+ taken to be the highest usable address (more or less). Based on
+ that (and general consultation of tea leaves, etc) return a
+ suggested end address for the client's stack. */
+
+Addr VG_(am_startup) ( Addr sp_at_startup )
+{
+ aspacem_assert(sizeof(Word) == sizeof(void*));
+ aspacem_assert(sizeof(Addr) == sizeof(void*));
+ aspacem_assert(sizeof(SizeT) == sizeof(void*));
+ aspacem_assert(sizeof(SSizeT) == sizeof(void*));
+
+ asegs_tnew.used = 0;
+ asegs_told.used = 0;
+
+ asegs_pri.used = 1;
+ init_AixSegments( &asegs_pri );
+ aspacem_assert( sane_AixSegments(&asegs_pri) );
+
+ if (0)
+ VG_(am_show_nsegments)(0,"AFTER VG_(am_startup)");
+
+ /* We do not make an initial read of /proc/../map since doing so
+ would leave us without a way to communicate the results to a
+ caller. Hence we expect that the caller (m_main) will call
+ VG_(am_aix5_reread_procmap) soon after this call so as to get
+ the initial code/data segments recorded. */
+
+ /* Return value is irrelevant since we don't lay out the
+ client's stack; it is already done. */
+ return 0;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Preallocation (acquiring space from sbrk). ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static
+SysRes local_do_sbrk_NO_NOTIFY( Word delta )
+{
+ SysRes res;
+ aspacem_assert(__NR_AIX5_sbrk != __NR_AIX5_UNKNOWN);
+ res = VG_(do_syscall1)(__NR_AIX5_sbrk, (UWord)delta);
+ /* kernel produces (-1, VKI_ENOMEM) on failure. I think that's
+ ok. */
+ return res;
+}
+
+
+/* Find the ix of a prealloc section containing at least req_sz bytes,
+ or -1 if not found. Uses best-fit. */
+
+static Int find_prealloc_idx ( SizeT req_sz )
+{
+ SizeT best_sz, this_sz;
+ Int best_ix, i;
+ aspacem_assert(sizeof(SizeT) == sizeof(Addr));
+ aspacem_assert(req_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_sz));
+
+ best_sz = Addr_MAX;
+ best_ix = -1;
+
+ for (i = 0; i < asegs_pri.used; i++) {
+ AixSegment* s = &asegs_pri.seg[i];
+ if (s->kind != ASkPreAlloc)
+ continue;
+ this_sz
+ = s->end + 1 - s->start;
+ aspacem_assert(this_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(this_sz));
+ if (this_sz >= req_sz && this_sz < best_sz) {
+ best_sz = this_sz;
+ best_ix = i;
+ }
+ }
+
+ return best_ix;
+}
+
+
+/* Create a new prealloc section containing req_sz bytes. Returns
+ False if failed, True on success. */
+
+static Bool new_prealloc ( SizeT req_sz )
+{
+ SysRes sres;
+ AixSegment seg;
+ Addr start;
+ SSizeT delta;
+ HChar* why = NULL;
+
+ aspacem_assert(req_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_sz));
+
+ /* m_syswrap may have decided that it's not currently safe to allow
+ allocations from sbrk-world. If so, we have to fail. */
+ if (0 && !VG_(am_aix5_sbrk_allowed)) {
+ why = "sbrk disallowed";
+ goto fail;
+ }
+
+ /* Get the current limit. */
+ sres = local_do_sbrk_NO_NOTIFY(0);
+ if (sres.isError) {
+ why = "initial sbrk failed";
+ goto fail;
+ }
+
+ /* Get it page aligned */
+ delta = AM_4K_ROUNDUP(sres.res) - sres.res;
+ aspacem_assert(delta >= 0 && delta < AM_4K_PAGESZ);
+ if (delta > 0) {
+ sres = local_do_sbrk_NO_NOTIFY(delta);
+ if (sres.isError) {
+ why = "aligning sbrk failed";
+ goto fail;
+ }
+ }
+
+ /* Now the brk is aligned. Try to acquire the block. */
+ sres = local_do_sbrk_NO_NOTIFY(0);
+ if (sres.isError)
+ return False;
+ start = sres.res;
+ aspacem_assert( AM_IS_4K_ALIGNED( start ));
+
+ sres = local_do_sbrk_NO_NOTIFY( req_sz );
+ if (sres.isError) {
+ why = "main sbrk failed";
+ goto fail;
+ }
+
+ /* If this fails, the kernel is acting strange. */
+ aspacem_assert( sres.res == start );
+
+ init_AixSegment( &seg );
+ seg.start = start;
+ seg.end = start + req_sz - 1;
+ seg.kind = ASkPreAlloc;
+ seg.hasR = seg.hasW = seg.hasX = True; /* presumably */
+ add_asegment( &asegs_pri, &seg );
+
+ VG_(debugLog)(
+ 1, "aspacem", "new_prealloc: SUCCESS at 0x%llx size %lld\n",
+ (ULong)start, (ULong)req_sz
+ );
+ return True;
+
+ fail:
+ VG_(debugLog)(1, "aspacem", "new_prealloc: FAILED: %s\n", why);
+ return False;
+}
+
+
+/* Find the ix of a prealloc section capable of holding a block of
+ size req_sz. If none exists, try to create one first. Returns -1
+ on failure. */
+
+static Int find_or_create_prealloc_idx ( SizeT req_sz )
+{
+ Int ix;
+ SizeT req_szX;
+ Bool alloc_ok;
+
+ if (0)
+ VG_(debugLog)(0, "zz", " find_or_create_prealloc_idx ( %lu )\n",
+ req_sz);
+
+ aspacem_assert(sizeof(SizeT) == sizeof(Addr));
+ aspacem_assert(req_sz > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_sz));
+
+ ix = find_prealloc_idx ( req_sz );
+ if (ix >= 0 && ix < asegs_pri.used)
+ return ix;
+
+ /* Not found. We'll have to allocate one. Allocate some extra at
+ the same time, so as to give a reservoir from which to satisfy
+ future requests. */
+ aspacem_assert(ix == -1);
+
+ req_szX = req_sz + AM_PREALLOC_EXTRA;
+ aspacem_assert(req_szX > 0);
+ aspacem_assert(AM_IS_4K_ALIGNED(req_szX));
+
+ alloc_ok = new_prealloc( req_szX );
+ if (!alloc_ok)
+ return -1; /* failed */
+
+ /* We should now be able to find it in the segment table. */
+ ix = find_prealloc_idx( req_sz );
+ aspacem_assert(ix >= 0 && ix < asegs_pri.used);
+ return ix;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The core query-notify mechanism. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Query aspacem to ask where a mapping should go. */
+
+Addr VG_(am_get_advisory) ( MapRequest* req,
+ Bool forClient,
+ /*OUT*/Bool* ok )
+{
+ ML_(am_barf)("unimplemented: VG_(am_get_advisory)");
+ /*NOTREACHED*/
+ return 0; /* placate gcc -Wall */
+}
+
+
+/* Convenience wrapper for VG_(am_get_advisory) for client floating or
+ fixed requests. If start is zero, a floating request is issued; if
+ nonzero, a fixed request at that address is issued. Same comments
+ about return values apply. */
+
+Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
+ /*OUT*/Bool* ok )
+{
+ ML_(am_barf)("unimplemented: VG_(am_get_advisory_client_simple)");
+ /*NOTREACHED*/
+ return 0; /* placate gcc -Wall */
+}
+
+
+/* Notifies aspacem that the client completed an mmap successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
+ Int fd, Off64T offset )
+{
+ AixSegment seg;
+ Bool needDiscard;
+
+ if (len == 0)
+ return False;
+
+ /* Discard is needed if any of the just-trashed range had T. */
+ needDiscard = True; /* conservative but safe */
+
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC; /* XXX bogus: could be a file */
+ seg.start = a;
+ seg.end = a + len - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+
+ if (0)
+ VG_(debugLog)(0,"aspacem","notify mmap ( %p, %ld, %ld, %ld )\n",
+ (void*)a, len, (UWord)prot, (UWord)flags);
+
+ add_asegment( &asegs_pri, &seg );
+ AM_SANITY_CHECK("am_notify_client_mmap");
+ return needDiscard;
+}
+
+
+/* Notifies aspacem that the client completed a shmat successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
+{
+ AixSegment seg;
+ init_AixSegment( &seg );
+ seg.kind = ASkShmemC;
+ seg.start = a;
+ seg.end = seg.start + len - 1;
+ seg.hasR = (prot & VKI_PROT_READ) ? True : False;
+ seg.hasW = (prot & VKI_PROT_WRITE) ? True : False;
+ seg.hasX = (prot & VKI_PROT_EXEC) ? True : False;
+ add_asegment( &asegs_pri, &seg );
+ AM_SANITY_CHECK("am_notify_client_shmat");
+ if (0) VG_(am_show_nsegments)(0, "after shmat");
+ return True; /* be paranoid */
+}
+
+
+/* Notifies aspacem that an mprotect was completed successfully. The
+ segment array is updated accordingly. Note, as with
+ VG_(am_notify_munmap), it is not the job of this function to reject
+ stupid mprotects, for example the client doing mprotect of
+ non-client areas. Such requests should be intercepted earlier, by
+ the syscall wrapper for mprotect. This function merely records
+ whatever it is told. If the returned Bool is True, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
+{
+ Int i, iLo, iHi;
+ Bool newR, newW, newX, needDiscard;
+
+ if (len == 0)
+ return False;
+
+ newR = toBool(prot & VKI_PROT_READ);
+ newW = toBool(prot & VKI_PROT_WRITE);
+ newX = toBool(prot & VKI_PROT_EXEC);
+
+ /* Discard is needed if we're dumping X permission */
+ needDiscard = True; /* conservative but correct */
+
+ split_asegments_lo_and_hi( &asegs_pri, start, start+len-1, &iLo, &iHi );
+
+ iLo = find_asegment_idx(&asegs_pri, start);
+ iHi = find_asegment_idx(&asegs_pri, start + len - 1);
+
+ for (i = iLo; i <= iHi; i++) {
+ aspacem_assert(i >= 0 && i < asegs_pri.used);
+ /* Apply the permissions to all relevant segments. */
+ if (asegs_pri.seg[i].kind != ASkFree) {
+ asegs_pri.seg[i].hasR = newR;
+ asegs_pri.seg[i].hasW = newW;
+ asegs_pri.seg[i].hasX = newX;
+ aspacem_assert(sane_AixSegment(&asegs_pri.seg[i]));
+ }
+ }
+ if (0)
+ VG_(debugLog)(0,"aspacem","notify mprotect ( %p, %ld, %ld )\n",
+ (void*)start, len, (UWord)prot);
+ /* Changing permissions could have made previously un-mergable
+ segments mergeable. Therefore have to re-preen them. */
+ preen_asegments(&asegs_pri);
+ AM_SANITY_CHECK("am_notify_mprotect");
+ return needDiscard;
+}
+
+
+/* Notifies aspacem that an munmap completed successfully. The
+ segment array is updated accordingly. As with
+ VG_(am_notify_munmap), we merely record the given info, and don't
+ check it for sensibleness. If the returned Bool is True, the
+ caller should immediately discard translations from the specified
+ address range. */
+
+Bool VG_(am_notify_munmap)( Addr start, SizeT len )
+{
+ Bool needDiscard = True; /* conservative but safe */
+ AixSegment seg;
+
+ if (len == 0)
+ return False;
+
+ init_AixSegment( &seg );
+ seg.kind = ASkFree;
+ seg.start = start;
+ seg.end = start + len - 1;
+ add_asegment( &asegs_pri, &seg );
+ AM_SANITY_CHECK("am_notify_munmap");
+
+ return needDiscard;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Handling mappings which do not arise directly from the ---*/
+/*--- simulation of the client. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* --- --- --- map, unmap, protect --- --- --- */
+
+/* Map a file at a fixed address for the client, and update the
+ segment array accordingly. */
+
+SysRes VG_(am_mmap_file_fixed_client)
+ ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
+{
+ SysRes r = {0,0};
+ ML_(am_barf)("unimplemented: VG_(am_mmap_file_fixed_client)");
+ /*NOTREACHED*/
+ return r;
+}
+
+
+/* Map anonymously at a fixed address for the client, and update
+ the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
+{
+ SysRes r = {0,0};
+ ML_(am_barf)("unimplemented: VG_(am_mmap_anon_fixed_client)");
+ /*NOTREACHED*/
+ return r;
+}
+
+
+/* Map anonymously at an unconstrained address for the client, and
+ update the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
+{
+ SysRes sres;
+ AixSegment seg;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* AIX seems to demand fd == -1 in anonymous mappings. hence: */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ 0, length,
+ prot,
+ VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ -1, 0
+ );
+
+ if (!sres.isError) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.start = sres.res;
+ seg.end = seg.start + length - 1;
+ seg.hasR = toBool((prot & VKI_PROT_READ) > 0);
+ seg.hasW = toBool((prot & VKI_PROT_WRITE) > 0);
+ seg.hasX = toBool((prot & VKI_PROT_EXEC) > 0);
+ seg.fromP = False;
+ add_asegment( &asegs_pri, &seg );
+ VG_(debugLog)(2, "aspacem", "new AnonC from mmap, size %lu\n",
+ length );
+ }
+
+ return sres;
+}
+
+
+/* Similarly, acquire new address space for the client but with
+ considerable restrictions on what can be done with it: (1) the
+ actual protections may exceed those stated in 'prot', (2) the
+ area's protections cannot be later changed using any form of
+ mprotect, and (3) the area cannot be freed using any form of
+ munmap. On Linux this behaves the same as
+ VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
+ by using sbrk, so as to make use of large pages on AIX. */
+
+SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
+{
+ Int ix;
+ SysRes sres;
+ AixSegment seg;
+ SizeT lenX = AM_4K_ROUNDUP(length);
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* First see if we can get space from sbrk-world. */
+ ix = find_or_create_prealloc_idx ( lenX );
+ if (ix >= 0 && ix < asegs_pri.used) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonC;
+ seg.start = asegs_pri.seg[ix].start;
+ seg.end = seg.start + lenX - 1;
+ seg.hasR = toBool((prot & VKI_PROT_READ) > 0);
+ seg.hasW = toBool((prot & VKI_PROT_WRITE) > 0);
+ seg.hasX = toBool((prot & VKI_PROT_EXEC) > 0);
+ seg.fromP = True;
+ add_asegment( &asegs_pri, &seg );
+ sres = VG_(mk_SysRes_Success)( seg.start );
+ VG_(debugLog)(2, "aspacem", "new AnonC from prealloc, size %lu\n",
+ length );
+ return sres;
+ }
+
+ /* That didn't work out. Try mmap-world instead. */
+ aspacem_assert(ix == -1);
+ return VG_(am_mmap_anon_float_client)( length, prot );
+}
+
+
+/* Map anonymously at an unconstrained address for V, and update the
+ segment array accordingly. This is fundamentally how V allocates
+ itself more address space when needed. */
+
+SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
+{
+ SysRes sres;
+ AixSegment seg;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* AIX seems to demand fd == -1 in anonymous mappings. hence: */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ 0, length,
+ VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
+ VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ -1, 0
+ );
+
+ if (!sres.isError) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonV;
+ seg.start = sres.res;
+ seg.end = seg.start + length - 1;
+ seg.hasR = seg.hasW = seg.hasX = True;
+ seg.fromP = False;
+ add_asegment( &asegs_pri, &seg );
+ VG_(debugLog)(2, "aspacem", "new AnonV from mmap, size %lu\n",
+ length );
+ }
+
+ return sres;
+}
+
+
+/* Same comments apply as per VG_(am_sbrk_anon_float_client). On
+ Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
+SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT length )
+{
+ Int ix;
+ SysRes sres;
+ AixSegment seg;
+ SizeT lenX = AM_4K_ROUNDUP(length);
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* First see if we can get space from sbrk-world. */
+ ix = find_or_create_prealloc_idx ( lenX );
+ if (ix >= 0 && ix < asegs_pri.used) {
+ init_AixSegment( &seg );
+ seg.kind = ASkAnonV;
+ seg.start = asegs_pri.seg[ix].start;
+ seg.end = seg.start + lenX - 1;
+ seg.hasR = True;
+ seg.hasW = True;
+ seg.hasX = True;
+ seg.fromP = True;
+ add_asegment( &asegs_pri, &seg );
+ sres = VG_(mk_SysRes_Success)( seg.start );
+ VG_(debugLog)(2, "aspacem", "new AnonV from prealloc, size %lu\n",
+ length );
+ return sres;
+ }
+
+ /* That didn't work out. Try mmap-world instead. */
+ aspacem_assert(ix == -1);
+ return VG_(am_mmap_anon_float_valgrind)( length );
+}
+
+
+/* Really just a wrapper around VG_(am_sbrk_anon_float_valgrind). */
+
+void* VG_(am_shadow_alloc)(SizeT size)
+{
+ SysRes sres = VG_(am_sbrk_anon_float_valgrind)( size );
+ return sres.isError ? NULL : (void*)sres.res;
+}
+
+
+/* Map a file at an unconstrained address for V, and update the
+ segment array accordingly. This is used by V for transiently
+ mapping in object files to read their debug info. */
+
+SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
+ Int fd, Off64T offset )
+{
+ SysRes sres;
+
+ /* Not allowable. */
+ if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ 0, length,
+ prot, VKI_MAP_PRIVATE,
+ fd, offset
+ );
+ if (!sres.isError) {
+ AixSegment seg;
+ init_AixSegment( &seg );
+ seg.kind = SkFileV;
+ seg.start = sres.res;
+ seg.end = seg.start + length - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ seg.fname = add_to_strtab("(FileV-float, unknown name)");
+ add_asegment( &asegs_pri, &seg );
+ aspacem_assert( sane_AixSegments( &asegs_pri ));
+ }
+ return sres;
+}
+
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for the client.
+ If *need_discard is True after a successful return, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
+ Addr start, SizeT len )
+{
+ SysRes r = {0,0};
+ ML_(am_barf)("unimplemented: VG_(am_munmap_client)");
+ /*NOTREACHED*/
+ return r;
+}
+
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for valgrind. */
+/* Also, if the specified range doesn't fall within a single segment,
+ it barfs. This simplifies the implementation; we shouldn't need to
+ deal with anything but the simplest cases. */
+
+SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
+{
+ AixSegment* seg;
+ AixSegment seg2;
+ Addr end;
+ SysRes sres;
+ Int ixS, ixE;
+ Bool debug = False;
+
+ if (debug)
+ VG_(debugLog)(0,"aspacem",
+ "am_munmap_valgrind(%p, %lu)\n", (void*)start, len);
+
+ if (len == 0)
+ return VG_(mk_SysRes_Success)(0);
+
+ /* We have to be a bit careful here. If the area being unmapped is
+ AnonV which originated from a preallocated area (hence from
+ sbrk-land) then we will have to return it to the preallocated
+ state, rather than unmapping it. */
+ end = start + len - 1;
+ aspacem_assert(start <= end); // else have wraparound?!
+
+ ixS = find_asegment_idx( &asegs_pri, start );
+ ixE = find_asegment_idx( &asegs_pri, end );
+
+ aspacem_assert(ixS >= 0 && ixS < asegs_pri.used);
+ aspacem_assert(ixE >= 0 && ixE < asegs_pri.used);
+
+ /* Preconditions: See comment at start of fn */
+ aspacem_assert(ixS == ixE);
+
+ /* For the segment S denoted by ixS:
+
+ - if S is AnonV from prealloc and S entirely within start .. end,
+ return it to prealloc
+
+ - if S is AnonV not from prealloc and S entirely within start .. end,
+ munmap it
+
+ - if S is FileV and S entirely within start .. end, munmap it
+
+ Otherwise, leave it alone (too complex to handle). In theory
+ this could cause a leak; in practice I don't think it will.
+ */
+ seg = &asegs_pri.seg[ixS];
+
+ if (debug)
+ show_AixSegment( 0, ixS, seg );
+
+ /* Invariants */
+ aspacem_assert(seg->start <= start);
+ aspacem_assert(end <= seg->end);
+
+ if (seg->kind == ASkFileV
+ || (seg->kind == ASkAnonV && (!seg->fromP))) {
+ if (debug)
+ VG_(debugLog)(0,"aspacem", "am_munmap_valgrind: !fromP: %p-%p\n",
+ (void*)start, (void*)end);
+ sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
+ if (sres.isError)
+ goto bad;
+ init_AixSegment( &seg2 );
+ seg2.start = start;
+ seg2.end = end;
+ seg2.kind = ASkFree;
+ add_asegment( &asegs_pri, &seg2 );
+ }
+ else
+ if (seg->kind == ASkAnonV && seg->fromP) {
+ if (debug)
+ VG_(debugLog)(0,"aspacem", "am_munmap_valgrind: fromP: %p-%p\n",
+ (void*)start, (void*)end);
+ init_AixSegment( &seg2 );
+ seg2.start = start;
+ seg2.end = end;
+ seg2.kind = ASkPreAlloc;
+ seg2.hasR = seg2.hasW = seg2.hasX = True;
+ add_asegment( &asegs_pri, &seg2 );
+ }
+ else {
+ /* shouldn't be asked to handle any other cases */
+ aspacem_assert(0);
+ }
+
+ aspacem_assert( sane_AixSegments( &asegs_pri ));
+ return VG_(mk_SysRes_Success)(0);
+
+ bad:
+ aspacem_assert( sane_AixSegments( &asegs_pri ));
+ return VG_(mk_SysRes_Error)(VKI_EINVAL);
+}
+
+
+/* Let (start,len) denote an area within a single Valgrind-owned
+ segment (anon or file). Change the ownership of [start, start+len)
+ to the client instead. Fails if (start,len) does not denote a
+ suitable segment. */
+
+Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
+{
+ return True;
+}
+
+
+/* 'seg' must be NULL or have been obtained from
+ VG_(am_find_nsegment), and still valid. If non-NULL, and if it
+ denotes a SkAnonC (anonymous client mapping) area, set the .isCH
+ (is-client-heap) flag for that area. Otherwise do nothing.
+ (Bizarre interface so that the same code works for both Linux and
+ AIX and does not impose inefficiencies on the Linux version.) */
+/* AIX: presumably this is a faked-up segment our VG_(am_find_segment)
+ came up with. So we have to find the corresponding AixSegment. */
+
+void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
+{
+ Int i;
+ if (seg == NULL)
+ return;
+ i = find_asegment_idx( &asegs_pri, seg->start );
+ aspacem_assert(i >= 0 && i < asegs_pri.used );
+ if (asegs_pri.seg[i].kind == ASkAnonC) {
+ asegs_pri.seg[i].isCH = True;
+ if (0)
+ VG_(debugLog)(0,"aspacem","set isCH for %p\n", (void*)seg->start );
+ } else {
+ aspacem_assert(asegs_pri.seg[i].isCH == False);
+ }
+}
+
+
+/* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
+ segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
+ segment. */
+/* AIX: we ignore these complexities by conservatively assuming that
+ all segments had translations taken from them. Hence we can safely
+ ignore this. */
+void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
+{
+}
+
+
+/* --- --- --- reservations --- --- --- */
+
+/* Create a reservation from START .. START+LENGTH-1, with the given
+ ShrinkMode. When checking whether the reservation can be created,
+ also ensure that at least abs(EXTRA) extra free bytes will remain
+ above (> 0) or below (< 0) the reservation.
+
+ The reservation will only be created if it, plus the extra-zone,
+ falls entirely within a single free segment. The returned Bool
+ indicates whether the creation succeeded. */
+
+Bool VG_(am_create_reservation) ( Addr start, SizeT length,
+ ShrinkMode smode, SSizeT extra )
+{
+ ML_(am_barf)("unimplemented: VG_(am_create_reservation)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/* Let SEG be an anonymous client mapping. This fn extends the
+ mapping by DELTA bytes, taking the space from a reservation section
+ which must be adjacent. If DELTA is positive, the segment is
+ extended forwards in the address space, and the reservation must be
+ the next one along. If DELTA is negative, the segment is extended
+ backwards in the address space and the reservation must be the
+ previous one. DELTA must be page aligned. abs(DELTA) must not
+ exceed the size of the reservation segment minus one page, that is,
+ the reservation segment after the operation must be at least one
+ page long. */
+
+Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
+ SSizeT delta )
+{
+ ML_(am_barf)("unimplemented: "
+ "VG_(am_extend_into_adjacent_reservation_client)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/* --- --- --- resizing/move a mapping --- --- --- */
+
+/* Let SEG be a client mapping (anonymous or file). This fn extends
+ the mapping forwards only by DELTA bytes, and trashes whatever was
+ in the new area. Fails if SEG is not a single client mapping or if
+ the new area is not accessible to the client. Fails if DELTA is
+ not page aligned. *seg is invalid after a successful return. If
+ *need_discard is True after a successful return, the caller should
+ immediately discard translations from the new area. */
+
+Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
+ NSegment* seg, SizeT delta )
+{
+ ML_(am_barf)("unimplemented: VG_(am_extend_map_client)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+/* Remap the old address range to the new address range. Fails if any
+ parameter is not page aligned, if the either size is zero, if any
+ wraparound is implied, if the old address range does not fall
+ entirely within a single segment, if the new address range overlaps
+ with the old one, or if the old address range is not a valid client
+ mapping. If *need_discard is True after a successful return, the
+ caller should immediately discard translations from both specified
+ address ranges. */
+
+Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
+ Addr old_addr, SizeT old_len,
+ Addr new_addr, SizeT new_len )
+{
+ ML_(am_barf)("unimplemented: VG_(am_relocate_nooverlap_client)");
+ /*NOTREACHED*/
+ return False;
+}
+
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- A simple parser for /proc/<pid>/map on AIX5. ---*/
+/*--- Almost completely independent of the stuff above. The ---*/
+/*--- only function it 'exports' to the code above this comment ---*/
+/*--- is parse_procselfmaps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+#include <sys/procfs.h>
+/* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+
+
+/* Size of a smallish table used to read /proc/<pid>/map entries. */
+#define M_APROCMAP_BUF 100000
+
+/* static ... to keep it out of the stack frame. */
+static HChar procmap_buf[M_APROCMAP_BUF];
+
+/* Records length of /proc/<pid>/map read into procmap_buf. */
+static Int buf_n_tot;
+
+/* Helper fns. */
+
+/* Get the contents of /proc/<pid>/map into a static buffer. If
+ there's a syntax error, it won't fit, or other failure, just
+ abort. */
+
+static void read_procselfmap_into_buf ( void )
+{
+ Char fname[50];
+ Int n_chunk;
+ SysRes fd;
+
+ ML_(am_sprintf)( fname, "/proc/%d/map", ML_(am_getpid)() );
+
+ /* Read the initial memory mapping from the /proc filesystem. */
+ fd = ML_(am_open)( fname, VKI_O_RDONLY, 0 );
+ if (fd.isError)
+ ML_(am_barf)("can't open /proc/<pid>/map");
+
+ buf_n_tot = 0;
+ do {
+ n_chunk = ML_(am_read)( fd.res, &procmap_buf[buf_n_tot],
+ M_APROCMAP_BUF - buf_n_tot );
+ buf_n_tot += n_chunk;
+ } while ( n_chunk > 0 && buf_n_tot < M_APROCMAP_BUF );
+
+ ML_(am_close)(fd.res);
+
+ if (buf_n_tot >= M_APROCMAP_BUF-5)
+ ML_(am_barf_toolow)("M_APROCMAP_BUF");
+ if (buf_n_tot == 0)
+ ML_(am_barf)("I/O error on /proc/<pid>/map");
+
+ procmap_buf[buf_n_tot] = 0;
+}
+
+
+/* /proc/<pid>/map appears to give out a non-absolute path name for
+ the main executable. Fortunately we can reliably identify the main
+ executable via the MA_MAINEXEC bit, and if we find the path is
+ non-absolute, replace it with /proc/<pid>/object/a.out instead.
+ AIX guarantees the latter is another name for the main
+ executable. */
+
+static HChar* kludge_exe_file_name ( HChar* file_name, prmap_t* map )
+{
+ static Int my_pid = -1;
+ static HChar a_out_name[64];
+ if (file_name == NULL)
+ return NULL;
+ if (file_name[0] != '/' && (map->pr_mflags & MA_MAINEXEC)) {
+ if (my_pid == -1)
+ my_pid = ML_(am_getpid)();
+ ML_(am_sprintf)(a_out_name, "/proc/%d/object/a.out", my_pid);
+ file_name = a_out_name;
+ }
+ return file_name;
+}
+
+
+
+/* Parse /proc/<pid>/map, copying the entries in it into an
+ AixSegments structure. Returns a properly formed AixSegments, with
+ ASkMText/ASkMData entries, with sibling pointers set up, and
+ ASkFree everywhere else.
+*/
+static void parse_procselfmap ( /*OUT*/AixSegments* segs )
+{
+ UChar rr, ww, xx, mm, ss;
+ prmap_t* map;
+ UChar* file_name;
+ UChar* member_name;
+ Bool show_map;
+ Int off, i, j;
+ AixSegment s;
+
+ const UInt valid_pr_mflags
+ = MA_MAINEXEC | MA_KERNTEXT | MA_READ | MA_WRITE
+ | MA_EXEC | MA_SHARED | MA_BREAK | MA_STACK;
+
+ segs->used = 1;
+ init_AixSegments(segs);
+ aspacem_assert( sane_AixSegments(segs) );
+
+ read_procselfmap_into_buf();
+
+ if (0)
+ VG_(debugLog)(0, "procselfmaps", "got %d bytes\n", buf_n_tot);
+
+ off = 0;
+ while (True) {
+
+ /* stay sane .. */
+ if (off + sizeof(prmap_t) > buf_n_tot)
+ break;
+
+ map = (prmap_t*)&procmap_buf[off];
+ off += sizeof(prmap_t);
+
+ /* When should we stop reading the array?
+ /usr/include/sys/procfs.h says that "Array entries continue
+ until an entry with a pr_size field of 0 and invalid
+ pr_mflags occurs." It unhelpfully fails to define what
+ "invalid" means here. However, the following test _seems_ to
+ work. */
+ if (map->pr_size == 0
+ && (map->pr_mflags & valid_pr_mflags) == 0)
+ break;
+
+ /* Ok, keep going, but ignore any zero-sized mappings: */
+ if (map->pr_size == 0)
+ continue;
+
+ mm = (map->pr_mflags & MA_MAINEXEC) > 0;
+ rr = (map->pr_mflags & MA_READ) > 0;
+ ww = (map->pr_mflags & MA_WRITE) > 0;
+ xx = (map->pr_mflags & MA_EXEC) > 0;
+ ss = (map->pr_mflags & MA_SHARED) > 0;
+
+ if (map->pr_pathoff > 0) {
+ file_name = &procmap_buf[map->pr_pathoff];
+ member_name = file_name + VG_(strlen)(file_name) + 1;
+ if (*member_name == 0)
+ member_name = NULL;
+ } else {
+ file_name = member_name = NULL;
+ }
+ file_name = kludge_exe_file_name( file_name, map );
+
+ /* Now file_name and member_name are NULL or ordinary strings.
+ Convert them to string-table resident strings. */
+ if (file_name)
+ file_name = add_to_strtab(file_name);
+ if (member_name)
+ member_name = add_to_strtab(member_name);
+
+ /* Create a suitable kind of segment. Initially we will start
+ with bogus sibling pointers, and allow ASkMData entries to
+ have file names, since we cannot assume anything about the
+ ordering of entries in the procmap file. In a second pass,
+ we will set up the sibling pointers based on those file
+ names, then remove the MData file names. */
+ init_AixSegment(&s);
+ show_map = False;
+ if (rr && (!ww) && xx) {
+ if (map->pr_size > 0) {
+ /* r-x segment; add bounds for a text area. */
+ s.kind = ASkMText;
+ s.start = (Addr)map->pr_vaddr;
+ s.end = (Addr)map->pr_vaddr + (Addr)map->pr_size - 1;
+ s.isMainExe = mm;
+ s.sibling = 0;
+ s.fname = file_name;
+ s.mname = member_name;
+ s.hasR = rr;
+ s.hasW = ww;
+ s.hasX = xx;
+ add_asegment(segs, &s);
+ }
+ }
+ else
+ if (rr && ww && (!xx)) {
+ if (map->pr_size > 0) {
+ /* rw- segment; add bounds for a data area. */
+ s.kind = ASkMData;
+ s.start = (Addr)map->pr_vaddr;
+ s.end = (Addr)map->pr_vaddr + (Addr)map->pr_size - 1;
+ /* Set a bogus non-zero sibling pointer, since sanity
+ checking will reject zero sibling pointers on MData.
+ It doesn't matter since the loops following this one
+ below fix up the sibling pointers. */
+ s.sibling = 1;
+ s.fname = file_name;
+ s.mname = member_name;
+ s.hasR = rr;
+ s.hasW = ww;
+ s.hasX = xx;
+ add_asegment(segs, &s);
+ }
+ }
+ else {
+ /* unclassifiable; we better complain. */
+ show_map = True;
+ VG_(debugLog)(0, "aspacem", "parse_procselfmap: unclassifiable:\n");
+ }
+
+ if (show_map)
+ VG_(debugLog)(1,"aspacem",
+ " %010llx-%010llx %c%c%c%c%c %s%s%s%s\n",
+ (ULong)map->pr_vaddr,
+ (ULong)map->pr_vaddr + (ULong)map->pr_size,
+ mm ? 'M' : '-',
+ rr ? 'r' : '-',
+ ww ? 'w' : '-',
+ xx ? 'x' : '-',
+ ss ? 'S' : '-',
+ file_name ? file_name : (UChar*)"(none)",
+ member_name ? "(" : "",
+ member_name ? member_name : (UChar*)"",
+ member_name ? ")" : ""
+ );
+
+ }
+
+ /* Set up sibling pointers. For each MData, find an MText with the
+ same file/member names, or complain. This is really ugly in
+ that it makes the process quadratic in the number of modules
+ mapped in, but I can't think of a (simple) better way. */
+
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind != ASkMData)
+ continue;
+ for (j = 0; j < segs->used; j++) {
+ if (segs->seg[j].kind == ASkMText
+ && segs->seg[j].fname == segs->seg[i].fname
+ && segs->seg[j].mname == segs->seg[i].mname)
+ break;
+ }
+ if (j == segs->used) {
+ VG_(debugLog)(0, "aspacem", "parse_procselfmap: "
+ "data segment with no associated text segment:\n");
+ VG_(debugLog)(0, "aspacem", "module = %s(%s)\n",
+ segs->seg[i].fname,
+ segs->seg[i].mname ? segs->seg[i].mname
+ : (UChar*)"(none)");
+ aspacem_assert(0);
+ }
+ aspacem_assert(j >= 0 && j < segs->used && j != i);
+ segs->seg[i].sibling = segs->seg[j].start;
+ }
+
+ /* (Almost) dually, for each MText, find an MData with same
+ file/member names, but don't complain if not present. */
+
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind != ASkMText)
+ continue;
+ for (j = 0; j < segs->used; j++) {
+ if (segs->seg[j].kind == ASkMData
+ && segs->seg[j].fname == segs->seg[i].fname
+ && segs->seg[j].mname == segs->seg[i].mname)
+ break;
+ }
+ if (j == segs->used) {
+ /* no corresponding MData found; harmless. */
+ } else {
+ aspacem_assert(j >= 0 && j < segs->used && j != i);
+ segs->seg[i].sibling = segs->seg[j].start;
+ }
+ }
+
+ /* Finally, get rid of fname/mname pointers on MDatas, so as to
+ adhere to the necessary representational invariants. */
+ for (i = 0; i < segs->used; i++) {
+ if (segs->seg[i].kind == ASkMData){
+ segs->seg[i].fname = segs->seg[i].mname = NULL;
+ }
+ }
+
+ aspacem_assert( sane_AixSegments(segs) );
+ if (0)
+ show_AixSegments(0, "as read from procmap", segs);
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/aspacemgr-common.c b/coregrind/m_aspacemgr/aspacemgr-common.c
new file mode 100644
index 0000000..ca4dc76
--- /dev/null
+++ b/coregrind/m_aspacemgr/aspacemgr-common.c
@@ -0,0 +1,386 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager: stuff common to all platforms ---*/
+/*--- ---*/
+/*--- m_aspacemgr-common.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+ ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
+ AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
+ ************************************************************* */
+
+#include "priv_aspacemgr.h"
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Stuff to make aspacem almost completely independent of ---*/
+/*--- the rest of Valgrind. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+//--------------------------------------------------------------
+// Simple assert and assert-like fns, which avoid dependence on
+// m_libcassert, and hence on the entire debug-info reader swamp
+
+__attribute__ ((noreturn))
+void ML_(am_exit)( Int status )
+{
+# if defined(VGO_linux)
+ (void)VG_(do_syscall1)(__NR_exit_group, status);
+# endif
+ (void)VG_(do_syscall1)(__NR_exit, status);
+ /* Why are we still alive here? */
+ /*NOTREACHED*/
+ *(volatile Int *)0 = 'x';
+ aspacem_assert(2+2 == 5);
+}
+
+void ML_(am_barf) ( HChar* what )
+{
+ VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
+ VG_(debugLog)(0, "aspacem", "Exiting now.\n");
+ ML_(am_exit)(1);
+}
+
+void ML_(am_barf_toolow) ( HChar* what )
+{
+ VG_(debugLog)(0, "aspacem",
+ "Valgrind: FATAL: %s is too low.\n", what);
+ VG_(debugLog)(0, "aspacem", " Increase it and rebuild. "
+ "Exiting now.\n");
+ ML_(am_exit)(1);
+}
+
+void ML_(am_assert_fail)( const HChar* expr,
+ const Char* file,
+ Int line,
+ const Char* fn )
+{
+ VG_(debugLog)(0, "aspacem",
+ "Valgrind: FATAL: aspacem assertion failed:\n");
+ VG_(debugLog)(0, "aspacem", " %s\n", expr);
+ VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn);
+ VG_(debugLog)(0, "aspacem", "Exiting now.\n");
+ ML_(am_exit)(1);
+}
+
+Int ML_(am_getpid)( void )
+{
+ SysRes sres = VG_(do_syscall0)(__NR_getpid);
+ aspacem_assert(!sres.isError);
+ return sres.res;
+}
+
+
+//--------------------------------------------------------------
+// A simple sprintf implementation, so as to avoid dependence on
+// m_libcprint.
+
+static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p )
+{
+ HChar** aspacem_sprintf_ptr = p;
+ *(*aspacem_sprintf_ptr)++ = c;
+}
+
+static
+UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
+{
+ Int ret;
+ Char *aspacem_sprintf_ptr = buf;
+
+ ret = VG_(debugLog_vprintf)
+ ( local_add_to_aspacem_sprintf_buf,
+ &aspacem_sprintf_ptr, format, vargs );
+ local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
+
+ return ret;
+}
+
+UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... )
+{
+ UInt ret;
+ va_list vargs;
+
+ va_start(vargs,format);
+ ret = local_vsprintf(buf, format, vargs);
+ va_end(vargs);
+
+ return ret;
+}
+
+
+//--------------------------------------------------------------
+// Direct access to a handful of syscalls. This avoids dependence on
+// m_libc*. THESE DO NOT UPDATE THE ANY aspacem-internal DATA
+// STRUCTURES (SEGMENT LISTS). DO NOT USE THEM UNLESS YOU KNOW WHAT
+// YOU ARE DOING.
+
+/* --- Pertaining to mappings --- */
+
+/* Note: this is VG_, not ML_. */
+SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
+ UInt flags, UInt fd, Off64T offset)
+{
+ SysRes res;
+ aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
+# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux)
+ /* mmap2 uses 4096 chunks even if actual page size is bigger. */
+ aspacem_assert((offset % 4096) == 0);
+ res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
+ prot, flags, fd, offset / 4096);
+# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
+ prot, flags, fd, offset);
+# else
+# error Unknown platform
+# endif
+ return res;
+}
+
+static
+SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
+{
+ return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
+}
+
+SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length)
+{
+ return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
+}
+
+SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
+ Addr old_addr,
+ SizeT old_len,
+ SizeT new_len
+ )
+{
+ /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
+ new_len, WITHOUT moving it. If it can't be extended in place,
+ fail. */
+# if defined(VGO_linux)
+ return VG_(do_syscall5)(
+ __NR_mremap,
+ old_addr, old_len, new_len,
+ 0/*flags, meaning: must be at old_addr, else FAIL */,
+ 0/*new_addr, is ignored*/
+ );
+# elif defined(VGO_aix5)
+ ML_(am_barf)("ML_(am_do_extend_mapping_NO_NOTIFY) on AIX5");
+ /* NOTREACHED, but gcc doesn't understand that */
+ return VG_(mk_SysRes_Error)(0);
+# else
+# error Unknown OS
+# endif
+}
+
+SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
+ Addr old_addr, Addr old_len,
+ Addr new_addr, Addr new_len
+ )
+{
+ /* Move the mapping old_addr .. old_addr+old_len-1 to the new
+ location and with the new length. Only needs to handle the case
+ where the two areas do not overlap, neither length is zero, and
+ all args are page aligned. */
+# if defined(VGO_linux)
+ return VG_(do_syscall5)(
+ __NR_mremap,
+ old_addr, old_len, new_len,
+ VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
+ new_addr
+ );
+# elif defined(VGO_aix5)
+ ML_(am_barf)("ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY) on AIX5");
+ /* NOTREACHED, but gcc doesn't understand that */
+ return VG_(mk_SysRes_Error)(0);
+# else
+# error Unknown OS
+# endif
+}
+
+/* --- Pertaining to files --- */
+
+SysRes ML_(am_open) ( const Char* pathname, Int flags, Int mode )
+{
+ SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
+ return res;
+}
+
+Int ML_(am_read) ( Int fd, void* buf, Int count)
+{
+ SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
+ return res.isError ? -1 : res.res;
+}
+
+void ML_(am_close) ( Int fd )
+{
+ (void)VG_(do_syscall1)(__NR_close, fd);
+}
+
+Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz)
+{
+ SysRes res;
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+ return res.isError ? -1 : res.res;
+}
+
+/* Get the dev, inode and mode info for a file descriptor, if
+ possible. Returns True on success. */
+Bool ML_(am_get_fd_d_i_m)( Int fd,
+ /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode )
+{
+ SysRes res;
+ struct vki_stat buf;
+# if defined(VGO_linux) && defined(__NR_fstat64)
+ /* Try fstat64 first as it can cope with minor and major device
+ numbers outside the 0-255 range and it works properly for x86
+ binaries on amd64 systems where fstat seems to be broken. */
+ struct vki_stat64 buf64;
+ res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64);
+ if (!res.isError) {
+ *dev = (ULong)buf64.st_dev;
+ *ino = (ULong)buf64.st_ino;
+ *mode = (UInt) buf64.st_mode;
+ return True;
+ }
+# endif
+ res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
+ if (!res.isError) {
+ *dev = (ULong)buf.st_dev;
+ *ino = (ULong)buf.st_ino;
+ *mode = (UInt) buf.st_mode;
+ return True;
+ }
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Manage stacks for Valgrind itself. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Allocate and initialise a VgStack (anonymous valgrind space).
+ Protect the stack active area and the guard areas appropriately.
+ Returns NULL on failure, else the address of the bottom of the
+ stack. On success, also sets *initial_sp to what the stack pointer
+ should be set to. */
+
+VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
+{
+ Int szB;
+ SysRes sres;
+ VgStack* stack;
+ UInt* p;
+ Int i;
+
+ /* Allocate the stack. */
+ szB = VG_STACK_GUARD_SZB
+ + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;
+
+ sres = VG_(am_mmap_anon_float_valgrind)( szB );
+ if (sres.isError)
+ return NULL;
+
+ stack = (VgStack*)sres.res;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
+
+ /* Protect the guard areas. */
+ sres = local_do_mprotect_NO_NOTIFY(
+ (Addr) &stack[0],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+ if (sres.isError) goto protect_failed;
+ VG_(am_notify_mprotect)(
+ (Addr) &stack->bytes[0],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+
+ sres = local_do_mprotect_NO_NOTIFY(
+ (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+ if (sres.isError) goto protect_failed;
+ VG_(am_notify_mprotect)(
+ (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
+ VG_STACK_GUARD_SZB, VKI_PROT_NONE
+ );
+
+ /* Looks good. Fill the active area with junk so we can later
+ tell how much got used. */
+
+ p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
+ for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
+ p[i] = 0xDEADBEEF;
+
+ *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
+ *initial_sp -= 8;
+ *initial_sp &= ~((Addr)0xF);
+
+ VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
+ (ULong)(Addr)stack, szB);
+ ML_(am_do_sanity_check)();
+ return stack;
+
+ protect_failed:
+ /* The stack was allocated, but we can't protect it. Unmap it and
+ return NULL (failure). */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
+ ML_(am_do_sanity_check)();
+ return NULL;
+}
+
+
+/* Figure out how many bytes of the stack's active area have not
+ been used. Used for estimating if we are close to overflowing it. */
+
+Int VG_(am_get_VgStack_unused_szB)( VgStack* stack )
+{
+ Int i;
+ UInt* p;
+
+ p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
+ for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
+ if (p[i] != 0xDEADBEEF)
+ break;
+
+ return i * sizeof(UInt);
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/aspacemgr-linux.c b/coregrind/m_aspacemgr/aspacemgr-linux.c
new file mode 100644
index 0000000..615d623
--- /dev/null
+++ b/coregrind/m_aspacemgr/aspacemgr-linux.c
@@ -0,0 +1,3230 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager: segment initialisation and ---*/
+/*--- tracking, stack operations ---*/
+/*--- ---*/
+/*--- Implementation for Linux m_aspacemgr-linux.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+ ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
+ AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
+ ************************************************************* */
+
+#include "priv_aspacemgr.h"
+
+
+/* Note: many of the exported functions implemented below are
+ described more fully in comments in pub_core_aspacemgr.h.
+*/
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Overview. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Purpose
+ ~~~~~~~
+ The purpose of the address space manager (aspacem) is:
+
+ (1) to record the disposition of all parts of the process' address
+ space at all times.
+
+ (2) to the extent that it can, influence layout in ways favourable
+ to our purposes.
+
+ It is important to appreciate that whilst it can and does attempt
+ to influence layout, and usually succeeds, it isn't possible to
+ impose absolute control: in the end, the kernel is the final
+ arbiter, and can always bounce our requests.
+
+ Strategy
+ ~~~~~~~~
+ The strategy is therefore as follows:
+
+ * Track ownership of mappings. Each one can belong either to
+ Valgrind or to the client.
+
+ * Try to place the client's fixed and hinted mappings at the
+ requested addresses. Fixed mappings are allowed anywhere except
+ in areas reserved by Valgrind; the client can trash its own
+ mappings if it wants. Hinted mappings are allowed providing they
+ fall entirely in free areas; if not, they will be placed by
+ aspacem in a free area.
+
+ * Anonymous mappings are allocated so as to keep Valgrind and
+ client areas widely separated when possible. If address space
+ runs low, then they may become intermingled: aspacem will attempt
+ to use all possible space. But under most circumstances lack of
+ address space is not a problem and so the areas will remain far
+ apart.
+
+ Searches for client space start at aspacem_cStart and will wrap
+ around the end of the available space if needed. Searches for
+ Valgrind space start at aspacem_vStart and will also wrap around.
+ Because aspacem_cStart is approximately at the start of the
+ available space and aspacem_vStart is approximately in the
+ middle, for the most part the client anonymous mappings will be
+ clustered towards the start of available space, and Valgrind ones
+ in the middle.
+
+ The available space is delimited by aspacem_minAddr and
+ aspacem_maxAddr. aspacem is flexible and can operate with these
+ at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
+ to some low-ish value at startup (64M) and aspacem_maxAddr is
+ derived from the stack pointer at system startup. This seems a
+ reliable way to establish the initial boundaries.
+
+ 64-bit Linux is similar except for the important detail that the
+ upper boundary is set to 32G. The reason is so that all
+ anonymous mappings (basically all client data areas) are kept
+ below 32G, since that is the maximum range that memcheck can
+ track shadow memory using a fast 2-level sparse array. It can go
+ beyond that but runs much more slowly. The 32G limit is
+ arbitrary and is trivially changed. So, with the current
+ settings, programs on 64-bit Linux will appear to run out of
+ address space and presumably fail at the 32G limit. Given the
+ 9/8 space overhead of Memcheck, that means you should be able to
+ memcheckify programs that use up to about 14G natively.
+
+ Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
+ anonymous mappings. The client can still do fixed and hinted maps
+ at any addresses provided they do not overlap Valgrind's segments.
+ This makes Valgrind able to load prelinked .so's at their requested
+ addresses on 64-bit platforms, even if they are very high (eg,
+ 112TB).
+
+ At startup, aspacem establishes the usable limits, and advises
+ m_main to place the client stack at the top of the range, which on
+ a 32-bit machine will be just below the real initial stack. One
+ effect of this is that self-hosting sort-of works, because an inner
+ valgrind will then place its client's stack just below its own
+ initial stack.
+
+ The segment array and segment kinds
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ The central data structure is the segment array (segments[0
+ .. nsegments_used-1]). This covers the entire address space in
+ order, giving account of every byte of it. Free spaces are
+ represented explicitly as this makes many operations simpler.
+ Mergeable adjacent segments are aggressively merged so as to create
+ a "normalised" representation (preen_nsegments).
+
+ There are 7 (mutually-exclusive) segment kinds, the meaning of
+ which is important:
+
+ SkFree: a free space, which may be allocated either to Valgrind (V)
+ or the client (C).
+
+ SkAnonC: an anonymous mapping belonging to C. For these, aspacem
+ tracks a boolean indicating whether or not is is part of the
+ client's heap area (can't remember why).
+
+ SkFileC: a file mapping belonging to C.
+
+ SkShmC: a shared memory segment belonging to C.
+
+ SkAnonV: an anonymous mapping belonging to V. These cover all V's
+ dynamic memory needs, including non-client malloc/free areas,
+ shadow memory, and the translation cache.
+
+ SkFileV: a file mapping belonging to V. As far as I know these are
+ only created transiently for the purposes of reading debug info.
+
+ SkResvn: a reservation segment.
+
+ These are mostly straightforward. Reservation segments have some
+ subtlety, however.
+
+ A reservation segment is unmapped from the kernel's point of view,
+ but is an area in which aspacem will not create anonymous maps
+ (either Vs or Cs). The idea is that we will try to keep it clear
+ when the choice to do so is ours. Reservation segments are
+ 'invisible' from the client's point of view: it may choose to park
+ a fixed mapping in the middle of one, and that's just tough -- we
+ can't do anything about that. From the client's perspective
+ reservations are semantically equivalent to (although
+ distinguishable from, if it makes enquiries) free areas.
+
+ Reservations are a primitive mechanism provided for whatever
+ purposes the rest of the system wants. Currently they are used to
+ reserve the expansion space into which a growdown stack is
+ expanded, and into which the data segment is extended. Note,
+ though, those uses are entirely external to this module, which only
+ supplies the primitives.
+
+ Reservations may be shrunk in order that an adjoining anonymous
+ mapping may be extended. This makes dataseg/stack expansion work.
+ A reservation may not be shrunk below one page.
+
+ The advise/notify concept
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+ All mmap-related calls must be routed via aspacem. Calling
+ sys_mmap directly from the rest of the system is very dangerous
+ because aspacem's data structures will become out of date.
+
+ The fundamental mode of operation of aspacem is to support client
+ mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
+
+ * m_syswrap intercepts the mmap call. It examines the parameters
+ and identifies the requested placement constraints. There are
+ three possibilities: no constraint (MAny), hinted (MHint, "I
+ prefer X but will accept anything"), and fixed (MFixed, "X or
+ nothing").
+
+ * This request is passed to VG_(am_get_advisory). This decides on
+ a placement as described in detail in Strategy above. It may
+ also indicate that the map should fail, because it would trash
+ one of Valgrind's areas, which would probably kill the system.
+
+ * Control returns to the wrapper. If VG_(am_get_advisory) has
+ declared that the map should fail, then it must be made to do so.
+ Usually, though, the request is considered acceptable, in which
+ case an "advised" address is supplied. The advised address
+ replaces the original address supplied by the client, and
+ MAP_FIXED is set.
+
+ Note at this point that although aspacem has been asked for
+ advice on where to place the mapping, no commitment has yet been
+ made by either it or the kernel.
+
+ * The adjusted request is handed off to the kernel.
+
+ * The kernel's result is examined. If the map succeeded, aspacem
+ is told of the outcome (VG_(am_notify_client_mmap)), so it can
+ update its records accordingly.
+
+ This then is the central advise-notify idiom for handling client
+ mmap/munmap/mprotect/shmat:
+
+ * ask aspacem for an advised placement (or a veto)
+
+ * if not vetoed, hand request to kernel, using the advised placement
+
+ * examine result, and if successful, notify aspacem of the result.
+
+ There are also many convenience functions, eg
+ VG_(am_mmap_anon_fixed_client), which do both phases entirely within
+ aspacem.
+
+ To debug all this, a sync-checker is provided. It reads
+ /proc/self/maps, compares what it sees with aspacem's records, and
+ complains if there is a difference. --sanity-level=3 runs it before
+ and after each syscall, which is a powerful, if slow way of finding
+ buggy syscall wrappers.
+
+ Loss of pointercheck
+ ~~~~~~~~~~~~~~~~~~~~
+ Up to and including Valgrind 2.4.1, x86 segmentation was used to
+ enforce seperation of V and C, so that wild writes by C could not
+ trash V. This got called "pointercheck". Unfortunately, the new
+ more flexible memory layout, plus the need to be portable across
+ different architectures, means doing this in hardware is no longer
+ viable, and doing it in software is expensive. So at the moment we
+ don't do it at all.
+*/
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The Address Space Manager's state. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* ------ start of STATE for the address-space manager ------ */
+
+/* Max number of segments we can track. */
+#define VG_N_SEGMENTS 5000
+
+/* Max number of segment file names we can track. */
+#define VG_N_SEGNAMES 1000
+
+/* Max length of a segment file name. */
+#define VG_MAX_SEGNAMELEN 1000
+
+
+typedef
+ struct {
+ Bool inUse;
+ Bool mark;
+ HChar fname[VG_MAX_SEGNAMELEN];
+ }
+ SegName;
+
+/* Filename table. _used is the high water mark; an entry is only
+ valid if its index >= 0, < _used, and its .inUse field == True.
+ The .mark field is used to garbage-collect dead entries.
+*/
+static SegName segnames[VG_N_SEGNAMES];
+static Int segnames_used = 0;
+
+
+/* Array [0 .. nsegments_used-1] of all mappings. */
+/* Sorted by .addr field. */
+/* I: len may not be zero. */
+/* I: overlapping segments are not allowed. */
+/* I: the segments cover the entire address space precisely. */
+/* Each segment can optionally hold an index into the filename table. */
+
+static NSegment nsegments[VG_N_SEGMENTS];
+static Int nsegments_used = 0;
+
+#define Addr_MIN ((Addr)0)
+#define Addr_MAX ((Addr)(-1ULL))
+
+/* Limits etc */
+
+// The smallest address that aspacem will try to allocate
+static Addr aspacem_minAddr = 0;
+
+// The largest address that aspacem will try to allocate
+static Addr aspacem_maxAddr = 0;
+
+// Where aspacem will start looking for client space
+static Addr aspacem_cStart = 0;
+
+// Where aspacem will start looking for Valgrind space
+static Addr aspacem_vStart = 0;
+
+
+#define AM_SANITY_CHECK \
+ do { \
+ if (VG_(clo_sanity_level >= 3)) \
+ aspacem_assert(VG_(am_do_sync_check) \
+ (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
+ } while (0)
+
+/* ------ end of STATE for the address-space manager ------ */
+
+/* ------ Forwards decls ------ */
+inline
+static Int find_nsegment_idx ( Addr a );
+
+static void parse_procselfmaps (
+ void (*record_mapping)( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename ),
+ void (*record_gap)( Addr addr, SizeT len )
+ );
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Functions for finding information about file descriptors. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Extract the device, inode and mode numbers for a fd. */
+static
+Bool get_inode_for_fd ( Int fd, /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode )
+{
+ return ML_(am_get_fd_d_i_m)(fd, dev, ino, mode);
+}
+
+/* Given a file descriptor, attempt to deduce its filename. To do
+ this, we use /proc/self/fd/<FD>. If this doesn't point to a file,
+ or if it doesn't exist, we return False. */
+static
+Bool get_name_for_fd ( Int fd, /*OUT*/HChar* buf, Int nbuf )
+{
+ Int i;
+ HChar tmp[64];
+
+ ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd);
+ for (i = 0; i < nbuf; i++) buf[i] = 0;
+
+ if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
+ return True;
+ else
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- SegName array management. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Searches the filename table to find an index for the given name.
+ If none is found, an index is allocated and the name stored. If no
+ space is available we just give up. If the string is too long to
+ store, return -1.
+*/
+static Int allocate_segname ( const HChar* name )
+{
+ Int i, j, len;
+
+ aspacem_assert(name);
+
+ if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
+
+ len = VG_(strlen)(name);
+ if (len >= VG_MAX_SEGNAMELEN-1) {
+ return -1;
+ }
+
+ /* first see if we already have the name. */
+ for (i = 0; i < segnames_used; i++) {
+ if (!segnames[i].inUse)
+ continue;
+ if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
+ return i;
+ }
+ }
+
+ /* no we don't. So look for a free slot. */
+ for (i = 0; i < segnames_used; i++)
+ if (!segnames[i].inUse)
+ break;
+
+ if (i == segnames_used) {
+ /* no free slots .. advance the high-water mark. */
+ if (segnames_used+1 < VG_N_SEGNAMES) {
+ i = segnames_used;
+ segnames_used++;
+ } else {
+ ML_(am_barf_toolow)("VG_N_SEGNAMES");
+ }
+ }
+
+ /* copy it in */
+ segnames[i].inUse = True;
+ for (j = 0; j < len; j++)
+ segnames[i].fname[j] = name[j];
+ aspacem_assert(len < VG_MAX_SEGNAMELEN);
+ segnames[i].fname[len] = 0;
+ return i;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Displaying the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static HChar* show_SegKind ( SegKind sk )
+{
+ switch (sk) {
+ case SkFree: return " ";
+ case SkAnonC: return "anon";
+ case SkAnonV: return "ANON";
+ case SkFileC: return "file";
+ case SkFileV: return "FILE";
+ case SkShmC: return "shm ";
+ case SkResvn: return "RSVN";
+ default: return "????";
+ }
+}
+
+static HChar* show_ShrinkMode ( ShrinkMode sm )
+{
+ switch (sm) {
+ case SmLower: return "SmLower";
+ case SmUpper: return "SmUpper";
+ case SmFixed: return "SmFixed";
+ default: return "Sm?????";
+ }
+}
+
+static void show_Addr_concisely ( /*OUT*/HChar* buf, Addr aA )
+{
+ HChar* fmt;
+ ULong a = (ULong)aA;
+
+ if (a < 10*1000*1000ULL) {
+ fmt = "%7llu";
+ }
+ else if (a < 999999ULL * (1ULL<<20)) {
+ fmt = "%6llum";
+ a >>= 20;
+ }
+ else if (a < 999999ULL * (1ULL<<30)) {
+ fmt = "%6llug";
+ a >>= 30;
+ }
+ else if (a < 999999ULL * (1ULL<<40)) {
+ fmt = "%6llut";
+ a >>= 40;
+ }
+ else {
+ fmt = "%6llue";
+ a >>= 50;
+ }
+ ML_(am_sprintf)(buf, fmt, a);
+}
+
+
+/* Show full details of an NSegment */
+
+static void __attribute__ ((unused))
+ show_nsegment_full ( Int logLevel, NSegment* seg )
+{
+ HChar* name = "(none)";
+ if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
+ && segnames[seg->fnIdx].inUse
+ && segnames[seg->fnIdx].fname[0] != 0)
+ name = segnames[seg->fnIdx].fname;
+
+ VG_(debugLog)(logLevel, "aspacem",
+ "NSegment{%s, start=0x%llx, end=0x%llx, smode=%s, dev=%llu, "
+ "ino=%llu, offset=%lld, fnIdx=%d, hasR=%d, hasW=%d, hasX=%d, "
+ "hasT=%d, mark=%d, name=\"%s\"}\n",
+ show_SegKind(seg->kind),
+ (ULong)seg->start,
+ (ULong)seg->end,
+ show_ShrinkMode(seg->smode),
+ seg->dev, seg->ino, seg->offset, seg->fnIdx,
+ (Int)seg->hasR, (Int)seg->hasW, (Int)seg->hasX, (Int)seg->hasT,
+ (Int)seg->mark,
+ name
+ );
+}
+
+
+/* Show an NSegment in a user-friendly-ish way. */
+
+static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
+{
+ HChar len_buf[20];
+ ULong len = ((ULong)seg->end) - ((ULong)seg->start) + 1;
+ show_Addr_concisely(len_buf, len);
+
+ switch (seg->kind) {
+
+ case SkFree:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf
+ );
+ break;
+
+ case SkAnonC: case SkAnonV: case SkShmC:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf,
+ seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
+ seg->isCH ? 'H' : '-'
+ );
+ break;
+
+ case SkFileC: case SkFileV:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
+ "i=%-7lld o=%-7lld (%d)\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf,
+ seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
+ seg->isCH ? 'H' : '-',
+ seg->dev, seg->ino, seg->offset, seg->fnIdx
+ );
+ break;
+
+ case SkResvn:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
+ segNo, show_SegKind(seg->kind),
+ (ULong)seg->start, (ULong)seg->end, len_buf,
+ seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
+ seg->isCH ? 'H' : '-',
+ show_ShrinkMode(seg->smode)
+ );
+ break;
+
+ default:
+ VG_(debugLog)(
+ logLevel, "aspacem",
+ "%3d: ???? UNKNOWN SEGMENT KIND\n",
+ segNo
+ );
+ break;
+ }
+}
+
+/* Print out the segment array (debugging only!). */
+void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
+{
+ Int i;
+ VG_(debugLog)(logLevel, "aspacem",
+ "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
+ who, nsegments_used, segnames_used);
+ for (i = 0; i < segnames_used; i++) {
+ if (!segnames[i].inUse)
+ continue;
+ VG_(debugLog)(logLevel, "aspacem",
+ "(%2d) %s\n", i, segnames[i].fname);
+ }
+ for (i = 0; i < nsegments_used; i++)
+ show_nsegment( logLevel, i, &nsegments[i] );
+ VG_(debugLog)(logLevel, "aspacem",
+ ">>>\n");
+}
+
+
+/* Get the filename corresponding to this segment, if known and if it
+ has one. The returned name's storage cannot be assumed to be
+ persistent, so the caller should immediately copy the name
+ elsewhere. */
+HChar* VG_(am_get_filename)( NSegment const * seg )
+{
+ Int i;
+ aspacem_assert(seg);
+ i = seg->fnIdx;
+ if (i < 0 || i >= segnames_used || !segnames[i].inUse)
+ return NULL;
+ else
+ return &segnames[i].fname[0];
+}
+
+/* Collect up the start addresses of all non-free, non-resvn segments.
+ The interface is a bit strange in order to avoid potential
+ segment-creation races caused by dynamic allocation of the result
+ buffer *starts.
+
+ The function first computes how many entries in the result
+ buffer *starts will be needed. If this number <= nStarts,
+ they are placed in starts[0..], and the number is returned.
+ If nStarts is not large enough, nothing is written to
+ starts[0..], and the negation of the size is returned.
+
+ Correct use of this function may mean calling it multiple times in
+ order to establish a suitably-sized buffer. */
+
+Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
+{
+ Int i, j, nSegs;
+
+ /* don't pass dumbass arguments */
+ aspacem_assert(nStarts >= 0);
+
+ nSegs = 0;
+ for (i = 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
+ continue;
+ nSegs++;
+ }
+
+ if (nSegs > nStarts) {
+ /* The buffer isn't big enough. Tell the caller how big it needs
+ to be. */
+ return -nSegs;
+ }
+
+ /* There's enough space. So write into the result buffer. */
+ aspacem_assert(nSegs <= nStarts);
+
+ j = 0;
+ for (i = 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
+ continue;
+ starts[j] = nsegments[i].start;
+ j++;
+ }
+
+ aspacem_assert(j == nSegs); /* this should not fail */
+ return nSegs;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Sanity checking and preening of the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Check representational invariants for NSegments. */
+
+static Bool sane_NSegment ( NSegment* s )
+{
+ if (s == NULL) return False;
+
+ /* No zero sized segments and no wraparounds. */
+ if (s->start >= s->end) return False;
+
+ /* .mark is used for admin purposes only. */
+ if (s->mark) return False;
+
+ /* require page alignment */
+ if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
+ if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
+
+ switch (s->kind) {
+
+ case SkFree:
+ return
+ s->smode == SmFixed
+ && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
+ && !s->hasR && !s->hasW && !s->hasX && !s->hasT
+ && !s->isCH;
+
+ case SkAnonC: case SkAnonV: case SkShmC:
+ return
+ s->smode == SmFixed
+ && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
+ && (s->kind==SkAnonC ? True : !s->isCH);
+
+ case SkFileC: case SkFileV:
+ return
+ s->smode == SmFixed
+ && (s->fnIdx == -1 ||
+ (s->fnIdx >= 0 && s->fnIdx < segnames_used
+ && segnames[s->fnIdx].inUse))
+ && !s->isCH;
+
+ case SkResvn:
+ return
+ s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
+ && !s->hasR && !s->hasW && !s->hasX && !s->hasT
+ && !s->isCH;
+
+ default:
+ return False;
+ }
+}
+
+
+/* Try merging s2 into s1, if possible. If successful, s1 is
+ modified, and True is returned. Otherwise s1 is unchanged and
+ False is returned. */
+
+static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
+{
+ if (s1->kind != s2->kind)
+ return False;
+
+ if (s1->end+1 != s2->start)
+ return False;
+
+ /* reject cases which would cause wraparound */
+ if (s1->start > s2->end)
+ return False;
+
+ switch (s1->kind) {
+
+ case SkFree:
+ s1->end = s2->end;
+ return True;
+
+ case SkAnonC: case SkAnonV:
+ if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
+ && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
+ s1->end = s2->end;
+ s1->hasT |= s2->hasT;
+ return True;
+ }
+ break;
+
+ case SkFileC: case SkFileV:
+ if (s1->hasR == s2->hasR
+ && s1->hasW == s2->hasW && s1->hasX == s2->hasX
+ && s1->dev == s2->dev && s1->ino == s2->ino
+ && s2->offset == s1->offset
+ + ((ULong)s2->start) - ((ULong)s1->start) ) {
+ s1->end = s2->end;
+ s1->hasT |= s2->hasT;
+ return True;
+ }
+ break;
+
+ case SkShmC:
+ return False;
+
+ case SkResvn:
+ if (s1->smode == SmFixed && s2->smode == SmFixed) {
+ s1->end = s2->end;
+ return True;
+ }
+
+ default:
+ break;
+
+ }
+
+ return False;
+}
+
+
+/* Sanity-check and canonicalise the segment array (merge mergable
+ segments). Returns True if any segments were merged. */
+
+static Bool preen_nsegments ( void )
+{
+ Int i, j, r, w, nsegments_used_old = nsegments_used;
+
+ /* Pass 1: check the segment array covers the entire address space
+ exactly once, and also that each segment is sane. */
+ aspacem_assert(nsegments_used > 0);
+ aspacem_assert(nsegments[0].start == Addr_MIN);
+ aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
+
+ aspacem_assert(sane_NSegment(&nsegments[0]));
+ for (i = 1; i < nsegments_used; i++) {
+ aspacem_assert(sane_NSegment(&nsegments[i]));
+ aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
+ }
+
+ /* Pass 2: merge as much as possible, using
+ maybe_merge_segments. */
+ w = 0;
+ for (r = 1; r < nsegments_used; r++) {
+ if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
+ /* nothing */
+ } else {
+ w++;
+ if (w != r)
+ nsegments[w] = nsegments[r];
+ }
+ }
+ w++;
+ aspacem_assert(w > 0 && w <= nsegments_used);
+ nsegments_used = w;
+
+ /* Pass 3: free up unused string table slots */
+ /* clear mark bits */
+ for (i = 0; i < segnames_used; i++)
+ segnames[i].mark = False;
+ /* mark */
+ for (i = 0; i < nsegments_used; i++) {
+ j = nsegments[i].fnIdx;
+ aspacem_assert(j >= -1 && j < segnames_used);
+ if (j >= 0) {
+ aspacem_assert(segnames[j].inUse);
+ segnames[j].mark = True;
+ }
+ }
+ /* release */
+ for (i = 0; i < segnames_used; i++) {
+ if (segnames[i].mark == False) {
+ segnames[i].inUse = False;
+ segnames[i].fname[0] = 0;
+ }
+ }
+
+ return nsegments_used != nsegments_used_old;
+}
+
+
+/* Check the segment array corresponds with the kernel's view of
+ memory layout. sync_check_ok returns True if no anomalies were
+ found, else False. In the latter case the mismatching segments are
+ displayed.
+
+ The general idea is: we get the kernel to show us all its segments
+ and also the gaps in between. For each such interval, try and find
+ a sequence of appropriate intervals in our segment array which
+ cover or more than cover the kernel's interval, and which all have
+ suitable kinds/permissions etc.
+
+ Although any specific kernel interval is not matched exactly to a
+ valgrind interval or sequence thereof, eventually any disagreement
+ on mapping boundaries will be detected. This is because, if for
+ example valgrind's intervals cover a greater range than the current
+ kernel interval, it must be the case that a neighbouring free-space
+ interval belonging to valgrind cannot cover the neighbouring
+ free-space interval belonging to the kernel. So the disagreement
+ is detected.
+
+ In other words, we examine each kernel interval in turn, and check
+ we do not disagree over the range of that interval. Because all of
+ the address space is examined, any disagreements must eventually be
+ detected.
+*/
+
+static Bool sync_check_ok = False;
+
+static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename )
+{
+ Int iLo, iHi, i;
+ Bool sloppyXcheck;
+
+ /* If a problem has already been detected, don't continue comparing
+ segments, so as to avoid flooding the output with error
+ messages. */
+ if (!sync_check_ok)
+ return;
+
+ if (len == 0)
+ return;
+
+ /* The kernel should not give us wraparounds. */
+ aspacem_assert(addr <= addr + len - 1);
+
+ iLo = find_nsegment_idx( addr );
+ iHi = find_nsegment_idx( addr + len - 1 );
+
+ /* These 5 should be guaranteed by find_nsegment_idx. */
+ aspacem_assert(0 <= iLo && iLo < nsegments_used);
+ aspacem_assert(0 <= iHi && iHi < nsegments_used);
+ aspacem_assert(iLo <= iHi);
+ aspacem_assert(nsegments[iLo].start <= addr );
+ aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
+
+ /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
+ most recent NX-bit enabled CPUs) and so recent kernels attempt
+ to provide execute protection by placing all executable mappings
+ low down in the address space and then reducing the size of the
+ code segment to prevent code at higher addresses being executed.
+
+ These kernels report which mappings are really executable in
+ the /proc/self/maps output rather than mirroring what was asked
+ for when each mapping was created. In order to cope with this we
+ have a sloppyXcheck mode which we enable on x86 - in this mode we
+ allow the kernel to report execute permission when we weren't
+ expecting it but not vice versa. */
+# if defined(VGA_x86)
+ sloppyXcheck = True;
+# else
+ sloppyXcheck = False;
+# endif
+
+ /* NSegments iLo .. iHi inclusive should agree with the presented
+ data. */
+ for (i = iLo; i <= iHi; i++) {
+
+ Bool same, cmp_offsets, cmp_devino;
+ UInt seg_prot;
+
+ /* compare the kernel's offering against ours. */
+ same = nsegments[i].kind == SkAnonC
+ || nsegments[i].kind == SkAnonV
+ || nsegments[i].kind == SkFileC
+ || nsegments[i].kind == SkFileV
+ || nsegments[i].kind == SkShmC;
+
+ seg_prot = 0;
+ if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
+ if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
+ if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
+
+ cmp_offsets
+ = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
+
+ cmp_devino
+ = nsegments[i].dev != 0 || nsegments[i].ino != 0;
+
+ /* Consider other reasons to not compare dev/inode */
+
+ /* bproc does some godawful hack on /dev/zero at process
+ migration, which changes the name of it, and its dev & ino */
+ if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
+ cmp_devino = False;
+
+ /* hack apparently needed on MontaVista Linux */
+ if (filename && VG_(strstr)(filename, "/.lib-ro/"))
+ cmp_devino = False;
+
+ /* If we are doing sloppy execute permission checks then we
+ allow segment to have X permission when we weren't expecting
+ it (but not vice versa) so if the kernel reported execute
+ permission then pretend that this segment has it regardless
+ of what we were expecting. */
+ if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
+ seg_prot |= VKI_PROT_EXEC;
+ }
+
+ same = same
+ && seg_prot == prot
+ && (cmp_devino
+ ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
+ : True)
+ && (cmp_offsets
+ ? nsegments[i].start-nsegments[i].offset == addr-offset
+ : True);
+ if (!same) {
+ sync_check_ok = False;
+ VG_(debugLog)(
+ 0,"aspacem",
+ "sync_check_mapping_callback: segment mismatch: V's seg:\n");
+ show_nsegment_full( 0, &nsegments[i] );
+ goto show_kern_seg;
+ }
+ }
+
+ /* Looks harmless. Keep going. */
+ return;
+
+ show_kern_seg:
+ VG_(debugLog)(0,"aspacem",
+ "sync_check_mapping_callback: "
+ "segment mismatch: kernel's seg:\n");
+ VG_(debugLog)(0,"aspacem",
+ "start=0x%llx end=0x%llx prot=%u "
+ "dev=%llu ino=%llu offset=%lld name=\"%s\"\n",
+ (ULong)addr, ((ULong)addr) + ((ULong)len) - 1,
+ prot, dev, ino, offset,
+ filename ? (HChar*)filename : "(none)" );
+ return;
+}
+
+static void sync_check_gap_callback ( Addr addr, SizeT len )
+{
+ Int iLo, iHi, i;
+
+ /* If a problem has already been detected, don't continue comparing
+ segments, so as to avoid flooding the output with error
+ messages. */
+ if (!sync_check_ok)
+ return;
+
+ if (len == 0)
+ return;
+
+ /* The kernel should not give us wraparounds. */
+ aspacem_assert(addr <= addr + len - 1);
+
+ iLo = find_nsegment_idx( addr );
+ iHi = find_nsegment_idx( addr + len - 1 );
+
+ /* These 5 should be guaranteed by find_nsegment_idx. */
+ aspacem_assert(0 <= iLo && iLo < nsegments_used);
+ aspacem_assert(0 <= iHi && iHi < nsegments_used);
+ aspacem_assert(iLo <= iHi);
+ aspacem_assert(nsegments[iLo].start <= addr );
+ aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
+
+ /* NSegments iLo .. iHi inclusive should agree with the presented
+ data. */
+ for (i = iLo; i <= iHi; i++) {
+
+ Bool same;
+
+ /* compare the kernel's offering against ours. */
+ same = nsegments[i].kind == SkFree
+ || nsegments[i].kind == SkResvn;
+
+ if (!same) {
+ sync_check_ok = False;
+ VG_(debugLog)(
+ 0,"aspacem",
+ "sync_check_mapping_callback: segment mismatch: V's gap:\n");
+ show_nsegment_full( 0, &nsegments[i] );
+ goto show_kern_gap;
+ }
+ }
+
+ /* Looks harmless. Keep going. */
+ return;
+
+ show_kern_gap:
+ VG_(debugLog)(0,"aspacem",
+ "sync_check_gap_callback: segment mismatch: kernel's gap:\n");
+ VG_(debugLog)(0,"aspacem",
+ "start=0x%llx end=0x%llx\n",
+ (ULong)addr, ((ULong)addr) + ((ULong)len) - 1 );
+ return;
+}
+
+
+/* Sanity check: check that Valgrind and the kernel agree on the
+ address space layout. Prints offending segments and call point if
+ a discrepancy is detected, but does not abort the system. Returned
+ Bool is False if a discrepancy was found. */
+
+Bool VG_(am_do_sync_check) ( const HChar* fn,
+ const HChar* file, Int line )
+{
+ sync_check_ok = True;
+ if (0)
+ VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
+ parse_procselfmaps( sync_check_mapping_callback,
+ sync_check_gap_callback );
+ if (!sync_check_ok) {
+ VG_(debugLog)(0,"aspacem",
+ "sync check at %s:%d (%s): FAILED\n",
+ file, line, fn);
+ VG_(debugLog)(0,"aspacem", "\n");
+
+# if 0
+ {
+ HChar buf[100];
+ VG_(am_show_nsegments)(0,"post syncheck failure");
+ VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
+ VG_(system)(buf);
+ }
+# endif
+
+ }
+ return sync_check_ok;
+}
+
+/* Hook to allow sanity checks to be done from aspacemgr-common.c. */
+void ML_(am_do_sanity_check)( void )
+{
+ AM_SANITY_CHECK;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Low level access / modification of the segment array. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Binary search the interval array for a given address. Since the
+ array covers the entire address space the search cannot fail. The
+ _WRK function does the real work. Its caller (just below) caches
+ the results thereof, to save time. With N_CACHE of 63 we get a hit
+ rate exceeding 90% when running OpenOffice.
+
+ Re ">> 12", it doesn't matter that the page size of some targets
+ might be different from 12. Really "(a >> 12) % N_CACHE" is merely
+ a hash function, and the actual cache entry is always validated
+ correctly against the selected cache entry before use.
+*/
+/* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
+__attribute__((noinline))
+static Int find_nsegment_idx_WRK ( Addr a )
+{
+ Addr a_mid_lo, a_mid_hi;
+ Int mid,
+ lo = 0,
+ hi = nsegments_used-1;
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) {
+ /* Not found. This can't happen. */
+ ML_(am_barf)("find_nsegment_idx: not found");
+ }
+ mid = (lo + hi) / 2;
+ a_mid_lo = nsegments[mid].start;
+ a_mid_hi = nsegments[mid].end;
+
+ if (a < a_mid_lo) { hi = mid-1; continue; }
+ if (a > a_mid_hi) { lo = mid+1; continue; }
+ aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
+ aspacem_assert(0 <= mid && mid < nsegments_used);
+ return mid;
+ }
+}
+
+inline static Int find_nsegment_idx ( Addr a )
+{
+# define N_CACHE 63
+ static Addr cache_pageno[N_CACHE];
+ static Int cache_segidx[N_CACHE];
+ static Bool cache_inited = False;
+
+ static UWord n_q = 0;
+ static UWord n_m = 0;
+
+ UWord ix;
+
+ if (LIKELY(cache_inited)) {
+ /* do nothing */
+ } else {
+ for (ix = 0; ix < N_CACHE; ix++) {
+ cache_pageno[ix] = 0;
+ cache_segidx[ix] = -1;
+ }
+ cache_inited = True;
+ }
+
+ ix = (a >> 12) % N_CACHE;
+
+ n_q++;
+ if (0 && 0 == (n_q & 0xFFFF))
+ VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
+
+ if ((a >> 12) == cache_pageno[ix]
+ && cache_segidx[ix] >= 0
+ && cache_segidx[ix] < nsegments_used
+ && nsegments[cache_segidx[ix]].start <= a
+ && a <= nsegments[cache_segidx[ix]].end) {
+ /* hit */
+ /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
+ return cache_segidx[ix];
+ }
+ /* miss */
+ n_m++;
+ cache_segidx[ix] = find_nsegment_idx_WRK(a);
+ cache_pageno[ix] = a >> 12;
+ return cache_segidx[ix];
+# undef N_CACHE
+}
+
+
+
+/* Finds the segment containing 'a'. Only returns file/anon/resvn
+ segments. This returns a 'NSegment const *' - a pointer to
+ readonly data. */
+NSegment const * VG_(am_find_nsegment) ( Addr a )
+{
+ Int i = find_nsegment_idx(a);
+ aspacem_assert(i >= 0 && i < nsegments_used);
+ aspacem_assert(nsegments[i].start <= a);
+ aspacem_assert(a <= nsegments[i].end);
+ if (nsegments[i].kind == SkFree)
+ return NULL;
+ else
+ return &nsegments[i];
+}
+
+
+/* Given a pointer to a seg, tries to figure out which one it is in
+ nsegments[..]. Very paranoid. */
+static Int segAddr_to_index ( NSegment* seg )
+{
+ Int i;
+ if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
+ return -1;
+ i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
+ if (i < 0 || i >= nsegments_used)
+ return -1;
+ if (seg == &nsegments[i])
+ return i;
+ return -1;
+}
+
+
+/* Find the next segment along from 'here', if it is a file/anon/resvn
+ segment. */
+NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
+{
+ Int i = segAddr_to_index(here);
+ if (i < 0 || i >= nsegments_used)
+ return NULL;
+ if (fwds) {
+ i++;
+ if (i >= nsegments_used)
+ return NULL;
+ } else {
+ i--;
+ if (i < 0)
+ return NULL;
+ }
+ switch (nsegments[i].kind) {
+ case SkFileC: case SkFileV: case SkShmC:
+ case SkAnonC: case SkAnonV: case SkResvn:
+ return &nsegments[i];
+ default:
+ break;
+ }
+ return NULL;
+}
+
+
+/* Trivial fn: return the total amount of space in anonymous mappings,
+ both for V and the client. Is used for printing stats in
+ out-of-memory messages. */
+ULong VG_(am_get_anonsize_total)( void )
+{
+ Int i;
+ ULong total = 0;
+ for (i = 0; i < nsegments_used; i++) {
+ if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
+ total += (ULong)nsegments[i].end
+ - (ULong)nsegments[i].start + 1ULL;
+ }
+ }
+ return total;
+}
+
+
+/* Test if a piece of memory is addressable by the client with at
+ least the "prot" protection permissions by examining the underlying
+ segments. If freeOk is True then SkFree areas are also allowed.
+*/
+static
+Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
+{
+ Int i, iLo, iHi;
+ Bool needR, needW, needX;
+
+ if (len == 0)
+ return True; /* somewhat dubious case */
+ if (start + len < start)
+ return False; /* reject wraparounds */
+
+ needR = toBool(prot & VKI_PROT_READ);
+ needW = toBool(prot & VKI_PROT_WRITE);
+ needX = toBool(prot & VKI_PROT_EXEC);
+
+ iLo = find_nsegment_idx(start);
+ aspacem_assert(start >= nsegments[iLo].start);
+
+ if (start+len-1 <= nsegments[iLo].end) {
+ /* This is a speedup hack which avoids calling find_nsegment_idx
+ a second time when possible. It is always correct to just
+ use the "else" clause below, but is_valid_for_client is
+ called a lot by the leak checker, so avoiding pointless calls
+ to find_nsegment_idx, which can be expensive, is helpful. */
+ iHi = iLo;
+ } else {
+ iHi = find_nsegment_idx(start + len - 1);
+ }
+
+ for (i = iLo; i <= iHi; i++) {
+ if ( (nsegments[i].kind == SkFileC
+ || nsegments[i].kind == SkAnonC
+ || nsegments[i].kind == SkShmC
+ || (nsegments[i].kind == SkFree && freeOk)
+ || (nsegments[i].kind == SkResvn && freeOk))
+ && (needR ? nsegments[i].hasR : True)
+ && (needW ? nsegments[i].hasW : True)
+ && (needX ? nsegments[i].hasX : True) ) {
+ /* ok */
+ } else {
+ return False;
+ }
+ }
+ return True;
+}
+
+/* Test if a piece of memory is addressable by the client with at
+ least the "prot" protection permissions by examining the underlying
+ segments. */
+Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
+ UInt prot )
+{
+ return is_valid_for_client( start, len, prot, False/*free not OK*/ );
+}
+
+/* Variant of VG_(am_is_valid_for_client) which allows free areas to
+ be consider part of the client's addressable space. It also
+ considers reservations to be allowable, since from the client's
+ point of view they don't exist. */
+Bool VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( Addr start, SizeT len, UInt prot )
+{
+ return is_valid_for_client( start, len, prot, True/*free is OK*/ );
+}
+
+
+/* Test if a piece of memory is addressable by valgrind with at least
+ PROT_NONE protection permissions by examining the underlying
+ segments. */
+static Bool is_valid_for_valgrind( Addr start, SizeT len )
+{
+ Int i, iLo, iHi;
+
+ if (len == 0)
+ return True; /* somewhat dubious case */
+ if (start + len < start)
+ return False; /* reject wraparounds */
+
+ iLo = find_nsegment_idx(start);
+ iHi = find_nsegment_idx(start + len - 1);
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
+ /* ok */
+ } else {
+ return False;
+ }
+ }
+ return True;
+}
+
+
+/* Returns True if any part of the address range is marked as having
+ translations made from it. This is used to determine when to
+ discard code, so if in doubt return True. */
+
+static Bool any_Ts_in_range ( Addr start, SizeT len )
+{
+ Int iLo, iHi, i;
+ aspacem_assert(len > 0);
+ aspacem_assert(start + len > start);
+ iLo = find_nsegment_idx(start);
+ iHi = find_nsegment_idx(start + len - 1);
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].hasT)
+ return True;
+ }
+ return False;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Modifying the segment array, and constructing segments. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Split the segment containing 'a' into two, so that 'a' is
+ guaranteed to be the start of a new segment. If 'a' is already the
+ start of a segment, do nothing. */
+
+static void split_nsegment_at ( Addr a )
+{
+ Int i, j;
+
+ aspacem_assert(a > 0);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(a));
+
+ i = find_nsegment_idx(a);
+ aspacem_assert(i >= 0 && i < nsegments_used);
+
+ if (nsegments[i].start == a)
+ /* 'a' is already the start point of a segment, so nothing to be
+ done. */
+ return;
+
+ /* else we have to slide the segments upwards to make a hole */
+ if (nsegments_used >= VG_N_SEGMENTS)
+ ML_(am_barf_toolow)("VG_N_SEGMENTS");
+ for (j = nsegments_used-1; j > i; j--)
+ nsegments[j+1] = nsegments[j];
+ nsegments_used++;
+
+ nsegments[i+1] = nsegments[i];
+ nsegments[i+1].start = a;
+ nsegments[i].end = a-1;
+
+ if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
+ nsegments[i+1].offset
+ += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
+
+ aspacem_assert(sane_NSegment(&nsegments[i]));
+ aspacem_assert(sane_NSegment(&nsegments[i+1]));
+}
+
+
+/* Do the minimum amount of segment splitting necessary to ensure that
+ sLo is the first address denoted by some segment and sHi is the
+ highest address denoted by some other segment. Returns the indices
+ of the lowest and highest segments in the range. */
+
+static
+void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
+ /*OUT*/Int* iLo,
+ /*OUT*/Int* iHi )
+{
+ aspacem_assert(sLo < sHi);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
+
+ if (sLo > 0)
+ split_nsegment_at(sLo);
+ if (sHi < sHi+1)
+ split_nsegment_at(sHi+1);
+
+ *iLo = find_nsegment_idx(sLo);
+ *iHi = find_nsegment_idx(sHi);
+ aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
+ aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
+ aspacem_assert(*iLo <= *iHi);
+ aspacem_assert(nsegments[*iLo].start == sLo);
+ aspacem_assert(nsegments[*iHi].end == sHi);
+ /* Not that I'm overly paranoid or anything, definitely not :-) */
+}
+
+
+/* Add SEG to the collection, deleting/truncating any it overlaps.
+ This deals with all the tricky cases of splitting up segments as
+ needed. */
+
+static void add_segment ( NSegment* seg )
+{
+ Int i, iLo, iHi, delta;
+ Bool segment_is_sane;
+
+ Addr sStart = seg->start;
+ Addr sEnd = seg->end;
+
+ aspacem_assert(sStart <= sEnd);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
+
+ segment_is_sane = sane_NSegment(seg);
+ if (!segment_is_sane) show_nsegment_full(0,seg);
+ aspacem_assert(segment_is_sane);
+
+ split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
+
+ /* Now iLo .. iHi inclusive is the range of segment indices which
+ seg will replace. If we're replacing more than one segment,
+ slide those above the range down to fill the hole. */
+ delta = iHi - iLo;
+ aspacem_assert(delta >= 0);
+ if (delta > 0) {
+ for (i = iLo; i < nsegments_used-delta; i++)
+ nsegments[i] = nsegments[i+delta];
+ nsegments_used -= delta;
+ }
+
+ nsegments[iLo] = *seg;
+
+ (void)preen_nsegments();
+ if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
+}
+
+
+/* Clear out an NSegment record. */
+
+static void init_nsegment ( /*OUT*/NSegment* seg )
+{
+ seg->kind = SkFree;
+ seg->start = 0;
+ seg->end = 0;
+ seg->smode = SmFixed;
+ seg->dev = 0;
+ seg->ino = 0;
+ seg->mode = 0;
+ seg->offset = 0;
+ seg->fnIdx = -1;
+ seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
+ seg->mark = False;
+}
+
+/* Make an NSegment which holds a reservation. */
+
+static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
+{
+ aspacem_assert(start < end);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
+ init_nsegment(seg);
+ seg->kind = SkResvn;
+ seg->start = start;
+ seg->end = end;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Startup, including reading /proc/self/maps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename )
+{
+ NSegment seg;
+ init_nsegment( &seg );
+ seg.start = addr;
+ seg.end = addr+len-1;
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.offset = offset;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ seg.hasT = False;
+
+ /* Don't use the presence of a filename to decide if a segment in
+ the initial /proc/self/maps to decide if the segment is an AnonV
+ or FileV segment as some systems don't report the filename. Use
+ the device and inode numbers instead. Fixes bug #124528. */
+ seg.kind = SkAnonV;
+ if (dev != 0 && ino != 0)
+ seg.kind = SkFileV;
+ if (filename)
+ seg.fnIdx = allocate_segname( filename );
+
+ if (0) show_nsegment( 2,0, &seg );
+ add_segment( &seg );
+}
+
+/* Initialise the address space manager, setting up the initial
+ segment list, and reading /proc/self/maps into it. This must
+ be called before any other function.
+
+ Takes a pointer to the SP at the time V gained control. This is
+ taken to be the highest usable address (more or less). Based on
+ that (and general consultation of tea leaves, etc) return a
+ suggested end address for the client's stack. */
+
+Addr VG_(am_startup) ( Addr sp_at_startup )
+{
+ NSegment seg;
+ Addr suggested_clstack_top;
+
+ aspacem_assert(sizeof(Word) == sizeof(void*));
+ aspacem_assert(sizeof(Addr) == sizeof(void*));
+ aspacem_assert(sizeof(SizeT) == sizeof(void*));
+ aspacem_assert(sizeof(SSizeT) == sizeof(void*));
+
+ /* Check that we can store the largest imaginable dev, ino and
+ offset numbers in an NSegment. */
+ aspacem_assert(sizeof(seg.dev) == 8);
+ aspacem_assert(sizeof(seg.ino) == 8);
+ aspacem_assert(sizeof(seg.offset) == 8);
+ aspacem_assert(sizeof(seg.mode) == 4);
+
+ /* Add a single interval covering the entire address space. */
+ init_nsegment(&seg);
+ seg.kind = SkFree;
+ seg.start = Addr_MIN;
+ seg.end = Addr_MAX;
+ nsegments[0] = seg;
+ nsegments_used = 1;
+
+ /* Establish address limits and block out unusable parts
+ accordingly. */
+
+ VG_(debugLog)(2, "aspacem",
+ " sp_at_startup = 0x%010llx (supplied)\n",
+ (ULong)sp_at_startup );
+
+ aspacem_minAddr = (Addr) 0x04000000; // 64M
+
+# if VG_WORDSIZE == 8
+ aspacem_maxAddr = (Addr)0x800000000 - 1; // 32G
+# ifdef ENABLE_INNER
+ { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
+ if (aspacem_maxAddr > cse)
+ aspacem_maxAddr = cse;
+ }
+# endif
+# else
+ aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
+# endif
+
+ aspacem_cStart = aspacem_minAddr; // 64M
+ aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
+# ifdef ENABLE_INNER
+ aspacem_vStart -= 0x10000000; // 256M
+# endif
+
+ suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
+ + VKI_PAGE_SIZE;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
+
+ VG_(debugLog)(2, "aspacem",
+ " minAddr = 0x%010llx (computed)\n",
+ (ULong)aspacem_minAddr);
+ VG_(debugLog)(2, "aspacem",
+ " maxAddr = 0x%010llx (computed)\n",
+ (ULong)aspacem_maxAddr);
+ VG_(debugLog)(2, "aspacem",
+ " cStart = 0x%010llx (computed)\n",
+ (ULong)aspacem_cStart);
+ VG_(debugLog)(2, "aspacem",
+ " vStart = 0x%010llx (computed)\n",
+ (ULong)aspacem_vStart);
+ VG_(debugLog)(2, "aspacem",
+ "suggested_clstack_top = 0x%010llx (computed)\n",
+ (ULong)suggested_clstack_top);
+
+ if (aspacem_cStart > Addr_MIN) {
+ init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
+ add_segment(&seg);
+ }
+ if (aspacem_maxAddr < Addr_MAX) {
+ init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
+ add_segment(&seg);
+ }
+
+ /* Create a 1-page reservation at the notional initial
+ client/valgrind boundary. This isn't strictly necessary, but
+ because the advisor does first-fit and starts searches for
+ valgrind allocations at the boundary, this is kind of necessary
+ in order to get it to start allocating in the right place. */
+ init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1);
+ add_segment(&seg);
+
+ VG_(am_show_nsegments)(2, "Initial layout");
+
+ VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
+ parse_procselfmaps( read_maps_callback, NULL );
+
+ VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
+
+ AM_SANITY_CHECK;
+ return suggested_clstack_top;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- The core query-notify mechanism. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Query aspacem to ask where a mapping should go. */
+
+Addr VG_(am_get_advisory) ( MapRequest* req,
+ Bool forClient,
+ /*OUT*/Bool* ok )
+{
+ /* This function implements allocation policy.
+
+ The nature of the allocation request is determined by req, which
+ specifies the start and length of the request and indicates
+ whether the start address is mandatory, a hint, or irrelevant,
+ and by forClient, which says whether this is for the client or
+ for V.
+
+ Return values: the request can be vetoed (*ok is set to False),
+ in which case the caller should not attempt to proceed with
+ making the mapping. Otherwise, *ok is set to True, the caller
+ may proceed, and the preferred address at which the mapping
+ should happen is returned.
+
+ Note that this is an advisory system only: the kernel can in
+ fact do whatever it likes as far as placement goes, and we have
+ no absolute control over it.
+
+ Allocations will never be granted in a reserved area.
+
+ The Default Policy is:
+
+ Search the address space for two free intervals: one of them
+ big enough to contain the request without regard to the
+ specified address (viz, as if it was a floating request) and
+ the other being able to contain the request at the specified
+ address (viz, as if were a fixed request). Then, depending on
+ the outcome of the search and the kind of request made, decide
+ whether the request is allowable and what address to advise.
+
+ The Default Policy is overriden by Policy Exception #1:
+
+ If the request is for a fixed client map, we are prepared to
+ grant it providing all areas inside the request are either
+ free, reservations, or mappings belonging to the client. In
+ other words we are prepared to let the client trash its own
+ mappings if it wants to.
+
+ The Default Policy is overriden by Policy Exception #2:
+
+ If the request is for a hinted client map, we are prepared to
+ grant it providing all areas inside the request are either
+ free or reservations. In other words we are prepared to let
+ the client have a hinted mapping anywhere it likes provided
+ it does not trash either any of its own mappings or any of
+ valgrind's mappings.
+ */
+ Int i, j;
+ Addr holeStart, holeEnd, holeLen;
+ Bool fixed_not_required;
+
+ Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
+
+ Addr reqStart = req->rkind==MAny ? 0 : req->start;
+ Addr reqEnd = reqStart + req->len - 1;
+ Addr reqLen = req->len;
+
+ /* These hold indices for segments found during search, or -1 if not
+ found. */
+ Int floatIdx = -1;
+ Int fixedIdx = -1;
+
+ aspacem_assert(nsegments_used > 0);
+
+ if (0) {
+ VG_(am_show_nsegments)(0,"getAdvisory");
+ VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
+ (ULong)req->start, (ULong)req->len);
+ }
+
+ /* Reject zero-length requests */
+ if (req->len == 0) {
+ *ok = False;
+ return 0;
+ }
+
+ /* Reject wraparounds */
+ if ((req->rkind==MFixed || req->rkind==MHint)
+ && req->start + req->len < req->start) {
+ *ok = False;
+ return 0;
+ }
+
+ /* ------ Implement Policy Exception #1 ------ */
+
+ if (forClient && req->rkind == MFixed) {
+ Int iLo = find_nsegment_idx(reqStart);
+ Int iHi = find_nsegment_idx(reqEnd);
+ Bool allow = True;
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].kind == SkFree
+ || nsegments[i].kind == SkFileC
+ || nsegments[i].kind == SkAnonC
+ || nsegments[i].kind == SkShmC
+ || nsegments[i].kind == SkResvn) {
+ /* ok */
+ } else {
+ allow = False;
+ break;
+ }
+ }
+ if (allow) {
+ /* Acceptable. Granted. */
+ *ok = True;
+ return reqStart;
+ }
+ /* Not acceptable. Fail. */
+ *ok = False;
+ return 0;
+ }
+
+ /* ------ Implement Policy Exception #2 ------ */
+
+ if (forClient && req->rkind == MHint) {
+ Int iLo = find_nsegment_idx(reqStart);
+ Int iHi = find_nsegment_idx(reqEnd);
+ Bool allow = True;
+ for (i = iLo; i <= iHi; i++) {
+ if (nsegments[i].kind == SkFree
+ || nsegments[i].kind == SkResvn) {
+ /* ok */
+ } else {
+ allow = False;
+ break;
+ }
+ }
+ if (allow) {
+ /* Acceptable. Granted. */
+ *ok = True;
+ return reqStart;
+ }
+ /* Not acceptable. Fall through to the default policy. */
+ }
+
+ /* ------ Implement the Default Policy ------ */
+
+ /* Don't waste time looking for a fixed match if not requested to. */
+ fixed_not_required = req->rkind == MAny;
+
+ i = find_nsegment_idx(startPoint);
+
+ /* Examine holes from index i back round to i-1. Record the
+ index first fixed hole and the first floating hole which would
+ satisfy the request. */
+ for (j = 0; j < nsegments_used; j++) {
+
+ if (nsegments[i].kind != SkFree) {
+ i++;
+ if (i >= nsegments_used) i = 0;
+ continue;
+ }
+
+ holeStart = nsegments[i].start;
+ holeEnd = nsegments[i].end;
+
+ /* Stay sane .. */
+ aspacem_assert(holeStart <= holeEnd);
+ aspacem_assert(aspacem_minAddr <= holeStart);
+ aspacem_assert(holeEnd <= aspacem_maxAddr);
+
+ /* See if it's any use to us. */
+ holeLen = holeEnd - holeStart + 1;
+
+ if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
+ fixedIdx = i;
+
+ if (floatIdx == -1 && holeLen >= reqLen)
+ floatIdx = i;
+
+ /* Don't waste time searching once we've found what we wanted. */
+ if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
+ break;
+
+ i++;
+ if (i >= nsegments_used) i = 0;
+ }
+
+ aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
+ if (fixedIdx >= 0)
+ aspacem_assert(nsegments[fixedIdx].kind == SkFree);
+
+ aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
+ if (floatIdx >= 0)
+ aspacem_assert(nsegments[floatIdx].kind == SkFree);
+
+ AM_SANITY_CHECK;
+
+ /* Now see if we found anything which can satisfy the request. */
+ switch (req->rkind) {
+ case MFixed:
+ if (fixedIdx >= 0) {
+ *ok = True;
+ return req->start;
+ } else {
+ *ok = False;
+ return 0;
+ }
+ break;
+ case MHint:
+ if (fixedIdx >= 0) {
+ *ok = True;
+ return req->start;
+ }
+ if (floatIdx >= 0) {
+ *ok = True;
+ return nsegments[floatIdx].start;
+ }
+ *ok = False;
+ return 0;
+ case MAny:
+ if (floatIdx >= 0) {
+ *ok = True;
+ return nsegments[floatIdx].start;
+ }
+ *ok = False;
+ return 0;
+ default:
+ break;
+ }
+
+ /*NOTREACHED*/
+ ML_(am_barf)("getAdvisory: unknown request kind");
+ *ok = False;
+ return 0;
+}
+
+/* Convenience wrapper for VG_(am_get_advisory) for client floating or
+ fixed requests. If start is zero, a floating request is issued; if
+ nonzero, a fixed request at that address is issued. Same comments
+ about return values apply. */
+
+Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
+ /*OUT*/Bool* ok )
+{
+ MapRequest mreq;
+ mreq.rkind = start==0 ? MAny : MFixed;
+ mreq.start = start;
+ mreq.len = len;
+ return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
+}
+
+
+/* Notifies aspacem that the client completed an mmap successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
+ Int fd, Off64T offset )
+{
+ HChar buf[VKI_PATH_MAX];
+ ULong dev, ino;
+ UInt mode;
+ NSegment seg;
+ Bool needDiscard;
+
+ aspacem_assert(len > 0);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(a));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
+
+ /* Discard is needed if any of the just-trashed range had T. */
+ needDiscard = any_Ts_in_range( a, len );
+
+ init_nsegment( &seg );
+ seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
+ seg.start = a;
+ seg.end = a + len - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ if (!(flags & VKI_MAP_ANONYMOUS)) {
+ // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
+ seg.offset = offset;
+ if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.mode = mode;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx = allocate_segname( buf );
+ }
+ }
+ add_segment( &seg );
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+/* Notifies aspacem that the client completed a shmat successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+
+Bool
+VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
+{
+ NSegment seg;
+ Bool needDiscard;
+
+ aspacem_assert(len > 0);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(a));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ /* Discard is needed if any of the just-trashed range had T. */
+ needDiscard = any_Ts_in_range( a, len );
+
+ init_nsegment( &seg );
+ seg.kind = SkShmC;
+ seg.start = a;
+ seg.end = a + len - 1;
+ seg.offset = 0;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+/* Notifies aspacem that an mprotect was completed successfully. The
+ segment array is updated accordingly. Note, as with
+ VG_(am_notify_munmap), it is not the job of this function to reject
+ stupid mprotects, for example the client doing mprotect of
+ non-client areas. Such requests should be intercepted earlier, by
+ the syscall wrapper for mprotect. This function merely records
+ whatever it is told. If the returned Bool is True, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
+{
+ Int i, iLo, iHi;
+ Bool newR, newW, newX, needDiscard;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ if (len == 0)
+ return False;
+
+ newR = toBool(prot & VKI_PROT_READ);
+ newW = toBool(prot & VKI_PROT_WRITE);
+ newX = toBool(prot & VKI_PROT_EXEC);
+
+ /* Discard is needed if we're dumping X permission */
+ needDiscard = any_Ts_in_range( start, len ) && !newX;
+
+ split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
+
+ iLo = find_nsegment_idx(start);
+ iHi = find_nsegment_idx(start + len - 1);
+
+ for (i = iLo; i <= iHi; i++) {
+ /* Apply the permissions to all relevant segments. */
+ switch (nsegments[i].kind) {
+ case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
+ nsegments[i].hasR = newR;
+ nsegments[i].hasW = newW;
+ nsegments[i].hasX = newX;
+ aspacem_assert(sane_NSegment(&nsegments[i]));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Changing permissions could have made previously un-mergable
+ segments mergeable. Therefore have to re-preen them. */
+ (void)preen_nsegments();
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+
+/* Notifies aspacem that an munmap completed successfully. The
+ segment array is updated accordingly. As with
+ VG_(am_notify_munmap), we merely record the given info, and don't
+ check it for sensibleness. If the returned Bool is True, the
+ caller should immediately discard translations from the specified
+ address range. */
+
+Bool VG_(am_notify_munmap)( Addr start, SizeT len )
+{
+ NSegment seg;
+ Bool needDiscard;
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ if (len == 0)
+ return False;
+
+ needDiscard = any_Ts_in_range( start, len );
+
+ init_nsegment( &seg );
+ seg.start = start;
+ seg.end = start + len - 1;
+
+ /* The segment becomes unused (free). Segments from above
+ aspacem_maxAddr were originally SkResvn and so we make them so
+ again. Note, this isn't really right when the segment straddles
+ the aspacem_maxAddr boundary - then really it should be split in
+ two, the lower part marked as SkFree and the upper part as
+ SkResvn. Ah well. */
+ if (start > aspacem_maxAddr
+ && /* check previous comparison is meaningful */
+ aspacem_maxAddr < Addr_MAX)
+ seg.kind = SkResvn;
+ else
+ /* Ditto for segments from below aspacem_minAddr. */
+ if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
+ seg.kind = SkResvn;
+ else
+ seg.kind = SkFree;
+
+ add_segment( &seg );
+
+ /* Unmapping could create two adjacent free segments, so a preen is
+ needed. add_segment() will do that, so no need to here. */
+ AM_SANITY_CHECK;
+ return needDiscard;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Handling mappings which do not arise directly from the ---*/
+/*--- simulation of the client. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* --- --- --- map, unmap, protect --- --- --- */
+
+/* Map a file at a fixed address for the client, and update the
+ segment array accordingly. */
+
+SysRes VG_(am_mmap_file_fixed_client)
+ ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+ ULong dev, ino;
+ UInt mode;
+ HChar buf[VKI_PATH_MAX];
+
+ /* Not allowable. */
+ if (length == 0
+ || !VG_IS_PAGE_ALIGNED(start)
+ || !VG_IS_PAGE_ALIGNED(offset))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MFixed;
+ req.start = start;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok || advised != start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ start, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE,
+ fd, offset
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkFileC;
+ seg.start = start;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.offset = offset;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.mode = mode;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx = allocate_segname( buf );
+ }
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* Map anonymously at a fixed address for the client, and update
+ the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+
+ /* Not allowable. */
+ if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MFixed;
+ req.start = start;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok || advised != start)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ start, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != start) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkAnonC;
+ seg.start = start;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* Map anonymously at an unconstrained address for the client, and
+ update the segment array accordingly. */
+
+SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MAny;
+ req.start = 0;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ advised address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ advised, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkAnonC;
+ seg.start = advised;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* Similarly, acquire new address space for the client but with
+ considerable restrictions on what can be done with it: (1) the
+ actual protections may exceed those stated in 'prot', (2) the
+ area's protections cannot be later changed using any form of
+ mprotect, and (3) the area cannot be freed using any form of
+ munmap. On Linux this behaves the same as
+ VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
+ by using sbrk, so as to make use of large pages on AIX. */
+
+SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
+{
+ return VG_(am_mmap_anon_float_client) ( length, prot );
+}
+
+
+/* Map anonymously at an unconstrained address for V, and update the
+ segment array accordingly. This is fundamentally how V allocates
+ itself more address space when needed. */
+
+SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+
+ /* Not allowable. */
+ if (length == 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MAny;
+ req.start = 0;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ advised, length,
+ VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkAnonV;
+ seg.start = advised;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.hasR = True;
+ seg.hasW = True;
+ seg.hasX = True;
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+/* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
+
+void* VG_(am_shadow_alloc)(SizeT size)
+{
+ SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
+ return sres.isError ? NULL : (void*)sres.res;
+}
+
+/* Same comments apply as per VG_(am_sbrk_anon_float_client). On
+ Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
+
+SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
+{
+ return VG_(am_mmap_anon_float_valgrind)( cszB );
+}
+
+
+/* Map a file at an unconstrained address for V, and update the
+ segment array accordingly. This is used by V for transiently
+ mapping in object files to read their debug info. */
+
+SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
+ Int fd, Off64T offset )
+{
+ SysRes sres;
+ NSegment seg;
+ Addr advised;
+ Bool ok;
+ MapRequest req;
+ ULong dev, ino;
+ UInt mode;
+ HChar buf[VKI_PATH_MAX];
+
+ /* Not allowable. */
+ if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* Ask for an advisory. If it's negative, fail immediately. */
+ req.rkind = MAny;
+ req.start = 0;
+ req.len = length;
+ advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
+ if (!ok)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ /* We have been advised that the mapping is allowable at the
+ specified address. So hand it off to the kernel, and propagate
+ any resulting failure immediately. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ advised, length, prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE,
+ fd, offset
+ );
+ if (sres.isError)
+ return sres;
+
+ if (sres.res != advised) {
+ /* I don't think this can happen. It means the kernel made a
+ fixed map succeed but not at the requested location. Try to
+ repair the damage, then return saying the mapping failed. */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, length );
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Ok, the mapping succeeded. Now notify the interval map. */
+ init_nsegment( &seg );
+ seg.kind = SkFileV;
+ seg.start = sres.res;
+ seg.end = seg.start + VG_PGROUNDUP(length) - 1;
+ seg.offset = offset;
+ seg.hasR = toBool(prot & VKI_PROT_READ);
+ seg.hasW = toBool(prot & VKI_PROT_WRITE);
+ seg.hasX = toBool(prot & VKI_PROT_EXEC);
+ if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
+ seg.dev = dev;
+ seg.ino = ino;
+ seg.mode = mode;
+ }
+ if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
+ seg.fnIdx = allocate_segname( buf );
+ }
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return sres;
+}
+
+
+/* --- --- munmap helper --- --- */
+
+static
+SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
+ Addr start, SizeT len, Bool forClient )
+{
+ Bool d;
+ SysRes sres;
+
+ if (!VG_IS_PAGE_ALIGNED(start))
+ goto eINVAL;
+
+ if (len == 0) {
+ *need_discard = False;
+ return VG_(mk_SysRes_Success)( 0 );
+ }
+
+ if (start + len < len)
+ goto eINVAL;
+
+ len = VG_PGROUNDUP(len);
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(len));
+
+ if (forClient) {
+ if (!VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( start, len, VKI_PROT_NONE ))
+ goto eINVAL;
+ } else {
+ if (!is_valid_for_valgrind( start, len ))
+ goto eINVAL;
+ }
+
+ d = any_Ts_in_range( start, len );
+
+ sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
+ if (sres.isError)
+ return sres;
+
+ VG_(am_notify_munmap)( start, len );
+ AM_SANITY_CHECK;
+ *need_discard = d;
+ return sres;
+
+ eINVAL:
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+}
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for the client.
+ If *need_discard is True after a successful return, the caller
+ should immediately discard translations from the specified address
+ range. */
+
+SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
+ Addr start, SizeT len )
+{
+ return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
+}
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for valgrind. */
+
+SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
+{
+ Bool need_discard;
+ SysRes r = am_munmap_both_wrk( &need_discard,
+ start, len, False/*valgrind*/ );
+ /* If this assertion fails, it means we allowed translations to be
+ made from a V-owned section. Which shouldn't happen. */
+ if (!r.isError)
+ aspacem_assert(!need_discard);
+ return r;
+}
+
+/* Let (start,len) denote an area within a single Valgrind-owned
+ segment (anon or file). Change the ownership of [start, start+len)
+ to the client instead. Fails if (start,len) does not denote a
+ suitable segment. */
+
+Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
+{
+ Int i, iLo, iHi;
+
+ if (len == 0)
+ return True;
+ if (start + len < start)
+ return False;
+ if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
+ return False;
+
+ i = find_nsegment_idx(start);
+ if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
+ return False;
+ if (start+len-1 > nsegments[i].end)
+ return False;
+
+ aspacem_assert(start >= nsegments[i].start);
+ aspacem_assert(start+len-1 <= nsegments[i].end);
+
+ /* This scheme is like how mprotect works: split the to-be-changed
+ range into its own segment(s), then mess with them (it). There
+ should be only one. */
+ split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
+ aspacem_assert(iLo == iHi);
+ switch (nsegments[iLo].kind) {
+ case SkFileV: nsegments[iLo].kind = SkFileC; break;
+ case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
+ default: aspacem_assert(0); /* can't happen - guarded above */
+ }
+
+ preen_nsegments();
+ return True;
+}
+
+/* 'seg' must be NULL or have been obtained from
+ VG_(am_find_nsegment), and still valid. If non-NULL, and if it
+ denotes a SkAnonC (anonymous client mapping) area, set the .isCH
+ (is-client-heap) flag for that area. Otherwise do nothing.
+ (Bizarre interface so that the same code works for both Linux and
+ AIX and does not impose inefficiencies on the Linux version.) */
+void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
+{
+ Int i = segAddr_to_index( seg );
+ aspacem_assert(i >= 0 && i < nsegments_used);
+ if (nsegments[i].kind == SkAnonC) {
+ nsegments[i].isCH = True;
+ } else {
+ aspacem_assert(nsegments[i].isCH == False);
+ }
+}
+
+/* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
+ segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
+ segment. */
+void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
+{
+ Int i = segAddr_to_index( seg );
+ aspacem_assert(i >= 0 && i < nsegments_used);
+ if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
+ nsegments[i].hasT = True;
+ }
+}
+
+
+/* --- --- --- reservations --- --- --- */
+
+/* Create a reservation from START .. START+LENGTH-1, with the given
+ ShrinkMode. When checking whether the reservation can be created,
+ also ensure that at least abs(EXTRA) extra free bytes will remain
+ above (> 0) or below (< 0) the reservation.
+
+ The reservation will only be created if it, plus the extra-zone,
+ falls entirely within a single free segment. The returned Bool
+ indicates whether the creation succeeded. */
+
+Bool VG_(am_create_reservation) ( Addr start, SizeT length,
+ ShrinkMode smode, SSizeT extra )
+{
+ Int startI, endI;
+ NSegment seg;
+
+ /* start and end, not taking into account the extra space. */
+ Addr start1 = start;
+ Addr end1 = start + length - 1;
+
+ /* start and end, taking into account the extra space. */
+ Addr start2 = start1;
+ Addr end2 = end1;
+
+ if (extra < 0) start2 += extra; // this moves it down :-)
+ if (extra > 0) end2 += extra;
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
+ aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
+
+ startI = find_nsegment_idx( start2 );
+ endI = find_nsegment_idx( end2 );
+
+ /* If the start and end points don't fall within the same (free)
+ segment, we're hosed. This does rely on the assumption that all
+ mergeable adjacent segments can be merged, but add_segment()
+ should ensure that. */
+ if (startI != endI)
+ return False;
+
+ if (nsegments[startI].kind != SkFree)
+ return False;
+
+ /* Looks good - make the reservation. */
+ aspacem_assert(nsegments[startI].start <= start2);
+ aspacem_assert(end2 <= nsegments[startI].end);
+
+ init_nsegment( &seg );
+ seg.kind = SkResvn;
+ seg.start = start1; /* NB: extra space is not included in the
+ reservation. */
+ seg.end = end1;
+ seg.smode = smode;
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/* Let SEG be an anonymous client mapping. This fn extends the
+ mapping by DELTA bytes, taking the space from a reservation section
+ which must be adjacent. If DELTA is positive, the segment is
+ extended forwards in the address space, and the reservation must be
+ the next one along. If DELTA is negative, the segment is extended
+ backwards in the address space and the reservation must be the
+ previous one. DELTA must be page aligned. abs(DELTA) must not
+ exceed the size of the reservation segment minus one page, that is,
+ the reservation segment after the operation must be at least one
+ page long. */
+
+Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
+ SSizeT delta )
+{
+ Int segA, segR;
+ UInt prot;
+ SysRes sres;
+
+ /* Find the segment array index for SEG. If the assertion fails it
+ probably means you passed in a bogus SEG. */
+ segA = segAddr_to_index( seg );
+ aspacem_assert(segA >= 0 && segA < nsegments_used);
+
+ if (nsegments[segA].kind != SkAnonC)
+ return False;
+
+ if (delta == 0)
+ return True;
+
+ prot = (nsegments[segA].hasR ? VKI_PROT_READ : 0)
+ | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
+ | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
+
+ aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
+
+ if (delta > 0) {
+
+ /* Extending the segment forwards. */
+ segR = segA+1;
+ if (segR >= nsegments_used
+ || nsegments[segR].kind != SkResvn
+ || nsegments[segR].smode != SmLower
+ || nsegments[segR].start != nsegments[segA].end + 1
+ || delta + VKI_PAGE_SIZE
+ > (nsegments[segR].end - nsegments[segR].start + 1))
+ return False;
+
+ /* Extend the kernel's mapping. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ nsegments[segR].start, delta,
+ prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return False; /* kernel bug if this happens? */
+ if (sres.res != nsegments[segR].start) {
+ /* kernel bug if this happens? */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, delta );
+ return False;
+ }
+
+ /* Ok, success with the kernel. Update our structures. */
+ nsegments[segR].start += delta;
+ nsegments[segA].end += delta;
+ aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
+
+ } else {
+
+ /* Extending the segment backwards. */
+ delta = -delta;
+ aspacem_assert(delta > 0);
+
+ segR = segA-1;
+ if (segR < 0
+ || nsegments[segR].kind != SkResvn
+ || nsegments[segR].smode != SmUpper
+ || nsegments[segR].end + 1 != nsegments[segA].start
+ || delta + VKI_PAGE_SIZE
+ > (nsegments[segR].end - nsegments[segR].start + 1))
+ return False;
+
+ /* Extend the kernel's mapping. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(
+ nsegments[segA].start-delta, delta,
+ prot,
+ VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
+ 0, 0
+ );
+ if (sres.isError)
+ return False; /* kernel bug if this happens? */
+ if (sres.res != nsegments[segA].start-delta) {
+ /* kernel bug if this happens? */
+ (void)ML_(am_do_munmap_NO_NOTIFY)( sres.res, delta );
+ return False;
+ }
+
+ /* Ok, success with the kernel. Update our structures. */
+ nsegments[segR].end -= delta;
+ nsegments[segA].start -= delta;
+ aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
+
+ }
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/* --- --- --- resizing/move a mapping --- --- --- */
+
+/* Let SEG be a client mapping (anonymous or file). This fn extends
+ the mapping forwards only by DELTA bytes, and trashes whatever was
+ in the new area. Fails if SEG is not a single client mapping or if
+ the new area is not accessible to the client. Fails if DELTA is
+ not page aligned. *seg is invalid after a successful return. If
+ *need_discard is True after a successful return, the caller should
+ immediately discard translations from the new area. */
+
+Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
+ NSegment* seg, SizeT delta )
+{
+ Addr xStart;
+ SysRes sres;
+ NSegment seg_copy = *seg;
+ SizeT seg_old_len = seg->end + 1 - seg->start;
+
+ if (0)
+ VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
+
+ if (seg->kind != SkFileC && seg->kind != SkAnonC)
+ return False;
+
+ if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
+ return False;
+
+ xStart = seg->end+1;
+ if (xStart + delta < delta)
+ return False;
+
+ if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
+ VKI_PROT_NONE ))
+ return False;
+
+ AM_SANITY_CHECK;
+ sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
+ seg_old_len,
+ seg_old_len + delta );
+ if (sres.isError) {
+ AM_SANITY_CHECK;
+ return False;
+ } else {
+ /* the area must not have moved */
+ aspacem_assert(sres.res == seg->start);
+ }
+
+ *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
+
+ seg_copy.end += delta;
+ add_segment( &seg_copy );
+
+ if (0)
+ VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/* Remap the old address range to the new address range. Fails if any
+ parameter is not page aligned, if the either size is zero, if any
+ wraparound is implied, if the old address range does not fall
+ entirely within a single segment, if the new address range overlaps
+ with the old one, or if the old address range is not a valid client
+ mapping. If *need_discard is True after a successful return, the
+ caller should immediately discard translations from both specified
+ address ranges. */
+
+Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
+ Addr old_addr, SizeT old_len,
+ Addr new_addr, SizeT new_len )
+{
+ Int iLo, iHi;
+ SysRes sres;
+ NSegment seg;
+
+ if (old_len == 0 || new_len == 0)
+ return False;
+
+ if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
+ || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
+ return False;
+
+ if (old_addr + old_len < old_addr
+ || new_addr + new_len < new_addr)
+ return False;
+
+ if (old_addr + old_len - 1 < new_addr
+ || new_addr + new_len - 1 < old_addr) {
+ /* no overlap */
+ } else
+ return False;
+
+ iLo = find_nsegment_idx( old_addr );
+ iHi = find_nsegment_idx( old_addr + old_len - 1 );
+ if (iLo != iHi)
+ return False;
+
+ if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
+ return False;
+
+ sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
+ ( old_addr, old_len, new_addr, new_len );
+ if (sres.isError) {
+ AM_SANITY_CHECK;
+ return False;
+ } else {
+ aspacem_assert(sres.res == new_addr);
+ }
+
+ *need_discard = any_Ts_in_range( old_addr, old_len )
+ || any_Ts_in_range( new_addr, new_len );
+
+ seg = nsegments[iLo];
+
+ /* Mark the new area based on the old seg. */
+ if (seg.kind == SkFileC) {
+ seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
+ } else {
+ aspacem_assert(seg.kind == SkAnonC);
+ aspacem_assert(seg.offset == 0);
+ }
+ seg.start = new_addr;
+ seg.end = new_addr + new_len - 1;
+ add_segment( &seg );
+
+ /* Create a free hole in the old location. */
+ init_nsegment( &seg );
+ seg.start = old_addr;
+ seg.end = old_addr + old_len - 1;
+ /* See comments in VG_(am_notify_munmap) about this SkResvn vs
+ SkFree thing. */
+ if (old_addr > aspacem_maxAddr
+ && /* check previous comparison is meaningful */
+ aspacem_maxAddr < Addr_MAX)
+ seg.kind = SkResvn;
+ else
+ seg.kind = SkFree;
+
+ add_segment( &seg );
+
+ AM_SANITY_CHECK;
+ return True;
+}
+
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
+/*--- Almost completely independent of the stuff above. The ---*/
+/*--- only function it 'exports' to the code above this comment ---*/
+/*--- is parse_procselfmaps. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Size of a smallish table used to read /proc/self/map entries. */
+#define M_PROCMAP_BUF 100000
+
+/* static ... to keep it out of the stack frame. */
+static Char procmap_buf[M_PROCMAP_BUF];
+
+/* Records length of /proc/self/maps read into procmap_buf. */
+static Int buf_n_tot;
+
+/* Helper fns. */
+
+static Int hexdigit ( Char c )
+{
+ if (c >= '0' && c <= '9') return (Int)(c - '0');
+ if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
+ if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
+ return -1;
+}
+
+static Int decdigit ( Char c )
+{
+ if (c >= '0' && c <= '9') return (Int)(c - '0');
+ return -1;
+}
+
+static Int readchar ( const Char* buf, Char* ch )
+{
+ if (*buf == 0) return 0;
+ *ch = *buf;
+ return 1;
+}
+
+static Int readhex ( const Char* buf, UWord* val )
+{
+ /* Read a word-sized hex number. */
+ Int n = 0;
+ *val = 0;
+ while (hexdigit(*buf) >= 0) {
+ *val = (*val << 4) + hexdigit(*buf);
+ n++; buf++;
+ }
+ return n;
+}
+
+static Int readhex64 ( const Char* buf, ULong* val )
+{
+ /* Read a potentially 64-bit hex number. */
+ Int n = 0;
+ *val = 0;
+ while (hexdigit(*buf) >= 0) {
+ *val = (*val << 4) + hexdigit(*buf);
+ n++; buf++;
+ }
+ return n;
+}
+
+static Int readdec64 ( const Char* buf, ULong* val )
+{
+ Int n = 0;
+ *val = 0;
+ while (hexdigit(*buf) >= 0) {
+ *val = (*val * 10) + decdigit(*buf);
+ n++; buf++;
+ }
+ return n;
+}
+
+
+/* Get the contents of /proc/self/maps into a static buffer. If
+ there's a syntax error, it won't fit, or other failure, just
+ abort. */
+
+static void read_procselfmaps_into_buf ( void )
+{
+ Int n_chunk;
+ SysRes fd;
+
+ /* Read the initial memory mapping from the /proc filesystem. */
+ fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
+ if (fd.isError)
+ ML_(am_barf)("can't open /proc/self/maps");
+
+ buf_n_tot = 0;
+ do {
+ n_chunk = ML_(am_read)( fd.res, &procmap_buf[buf_n_tot],
+ M_PROCMAP_BUF - buf_n_tot );
+ if (n_chunk >= 0)
+ buf_n_tot += n_chunk;
+ } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
+
+ ML_(am_close)(fd.res);
+
+ if (buf_n_tot >= M_PROCMAP_BUF-5)
+ ML_(am_barf_toolow)("M_PROCMAP_BUF");
+ if (buf_n_tot == 0)
+ ML_(am_barf)("I/O error on /proc/self/maps");
+
+ procmap_buf[buf_n_tot] = 0;
+}
+
+/* Parse /proc/self/maps. For each map entry, call
+ record_mapping, passing it, in this order:
+
+ start address in memory
+ length
+ page protections (using the VKI_PROT_* flags)
+ mapped file device and inode
+ offset in file, or zero if no file
+ filename, zero terminated, or NULL if no file
+
+ So the sig of the called fn might be
+
+ void (*record_mapping)( Addr start, SizeT size, UInt prot,
+ UInt dev, UInt info,
+ ULong foffset, UChar* filename )
+
+ Note that the supplied filename is transiently stored; record_mapping
+ should make a copy if it wants to keep it.
+
+ Nb: it is important that this function does not alter the contents of
+ procmap_buf!
+*/
+static void parse_procselfmaps (
+ void (*record_mapping)( Addr addr, SizeT len, UInt prot,
+ ULong dev, ULong ino, Off64T offset,
+ const UChar* filename ),
+ void (*record_gap)( Addr addr, SizeT len )
+ )
+{
+ Int i, j, i_eol;
+ Addr start, endPlusOne, gapStart;
+ UChar* filename;
+ UChar rr, ww, xx, pp, ch, tmp;
+ UInt prot;
+ UWord maj, min;
+ ULong foffset, dev, ino;
+
+ foffset = ino = 0; /* keep gcc-4.1.0 happy */
+
+ read_procselfmaps_into_buf();
+
+ aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
+
+ if (0)
+ VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
+
+ /* Ok, it's safely aboard. Parse the entries. */
+ i = 0;
+ gapStart = Addr_MIN;
+ while (True) {
+ if (i >= buf_n_tot) break;
+
+ /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
+ j = readhex(&procmap_buf[i], &start);
+ if (j > 0) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == '-') i += j; else goto syntaxerror;
+ j = readhex(&procmap_buf[i], &endPlusOne);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &rr);
+ if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &ww);
+ if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &xx);
+ if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
+ /* This field is the shared/private flag */
+ j = readchar(&procmap_buf[i], &pp);
+ if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
+ i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readhex64(&procmap_buf[i], &foffset);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readhex(&procmap_buf[i], &maj);
+ if (j > 0) i += j; else goto syntaxerror;
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ':') i += j; else goto syntaxerror;
+ j = readhex(&procmap_buf[i], &min);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ j = readchar(&procmap_buf[i], &ch);
+ if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
+
+ j = readdec64(&procmap_buf[i], &ino);
+ if (j > 0) i += j; else goto syntaxerror;
+
+ goto read_line_ok;
+
+ syntaxerror:
+ VG_(debugLog)(0, "Valgrind:",
+ "FATAL: syntax error reading /proc/self/maps\n");
+ { Int k, m;
+ HChar buf50[51];
+ m = 0;
+ buf50[m] = 0;
+ k = i - 50;
+ if (k < 0) k = 0;
+ for (; k <= i; k++) {
+ buf50[m] = procmap_buf[k];
+ buf50[m+1] = 0;
+ if (m < 50-1) m++;
+ }
+ VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
+ }
+ ML_(am_exit)(1);
+
+ read_line_ok:
+
+ /* Try and find the name of the file mapped to this segment, if
+ it exists. Note that files can contains spaces. */
+
+ // Move i to the next non-space char, which should be either a '/' or
+ // a newline.
+ while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
+
+ // Move i_eol to the end of the line.
+ i_eol = i;
+ while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
+
+ // If there's a filename...
+ if (i < i_eol-1 && procmap_buf[i] == '/') {
+ /* Minor hack: put a '\0' at the filename end for the call to
+ 'record_mapping', then restore the old char with 'tmp'. */
+ filename = &procmap_buf[i];
+ tmp = filename[i_eol - i];
+ filename[i_eol - i] = '\0';
+ } else {
+ tmp = 0;
+ filename = NULL;
+ foffset = 0;
+ }
+
+ prot = 0;
+ if (rr == 'r') prot |= VKI_PROT_READ;
+ if (ww == 'w') prot |= VKI_PROT_WRITE;
+ if (xx == 'x') prot |= VKI_PROT_EXEC;
+
+ /* Linux has two ways to encode a device number when it
+ is exposed to user space (via fstat etc). The old way
+ is the traditional unix scheme that produces a 16 bit
+ device number with the top 8 being the major number and
+ the bottom 8 the minor number.
+
+ The new scheme allows for a 12 bit major number and
+ a 20 bit minor number by using a 32 bit device number
+ and putting the top 12 bits of the minor number into
+ the top 12 bits of the device number thus leaving an
+ extra 4 bits for the major number.
+
+ If the minor and major number are both single byte
+ values then both schemes give the same result so we
+ use the new scheme here in case either number is
+ outside the 0-255 range and then use fstat64 when
+ available (or fstat on 64 bit systems) so that we
+ should always have a new style device number and
+ everything should match. */
+ dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
+
+ if (record_gap && gapStart < start)
+ (*record_gap) ( gapStart, start-gapStart );
+
+ if (record_mapping && start < endPlusOne)
+ (*record_mapping) ( start, endPlusOne-start,
+ prot, dev, ino,
+ foffset, filename );
+
+ if ('\0' != tmp) {
+ filename[i_eol - i] = tmp;
+ }
+
+ i = i_eol + 1;
+ gapStart = endPlusOne;
+ }
+
+ if (record_gap && gapStart < Addr_MAX)
+ (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_aspacemgr/priv_aspacemgr.h b/coregrind/m_aspacemgr/priv_aspacemgr.h
new file mode 100644
index 0000000..3ce2229
--- /dev/null
+++ b/coregrind/m_aspacemgr/priv_aspacemgr.h
@@ -0,0 +1,130 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Module-local header file for m_aspacemgr. ---*/
+/*--- priv_aspacemgr.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PRIV_ASPACEMGR_H
+#define __PRIV_ASPACEMGR_H
+
+/* One of the important design goals of the address space manager is
+ to minimise dependence on other modules. Hence the following
+ minimal set of imports. */
+
+#include "pub_core_basics.h" // types
+#include "pub_core_vkiscnums.h" // system call numbers
+#include "pub_core_vki.h" // VKI_PAGE_SIZE, VKI_MREMAP_MAYMOVE,
+ // VKI_MREMAP_FIXED, vki_stat64
+
+#include "pub_core_debuglog.h" // VG_(debugLog)
+
+#include "pub_core_libcbase.h" // VG_(strlen), VG_(strcmp)
+ // VG_IS_PAGE_ALIGNED
+ // VG_PGROUNDDN, VG_PGROUNDUP
+
+#include "pub_core_syscall.h" // VG_(do_syscallN)
+ // VG_(mk_SysRes_Error)
+ // VG_(mk_SysRes_Success)
+
+#include "pub_core_options.h" // VG_(clo_sanity_level)
+
+#include "pub_core_aspacemgr.h" // self
+
+
+/* --------------- Implemented in aspacemgr-common.c ---------------*/
+
+/* Simple assert-like, file I/O and syscall facilities, which avoid
+ dependence on m_libcassert, and hence on the entire module graph.
+ This is important since most of the system itself depends on
+ aspacem, so we have to do this to avoid a circular dependency. */
+
+__attribute__ ((noreturn))
+extern void ML_(am_exit) ( Int status );
+extern void ML_(am_barf) ( HChar* what );
+extern void ML_(am_barf_toolow) ( HChar* what );
+
+__attribute__ ((noreturn))
+extern void ML_(am_assert_fail) ( const HChar* expr,
+ const Char* file,
+ Int line,
+ const Char* fn );
+
+#define aspacem_assert(expr) \
+ ((void) ((expr) ? 0 : \
+ (ML_(am_assert_fail)(#expr, \
+ __FILE__, __LINE__, \
+ __PRETTY_FUNCTION__))))
+
+/* Dude, what's my process ID ? */
+extern Int ML_(am_getpid)( void );
+
+/* A simple, self-contained sprintf implementation. */
+extern UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... );
+
+/* mmap et al wrappers */
+/* wrapper for munmap */
+extern SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length);
+
+/* wrapper for the ghastly 'mremap' syscall */
+extern SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
+ Addr old_addr,
+ SizeT old_len,
+ SizeT new_len
+ );
+/* ditto */
+extern SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
+ Addr old_addr, Addr old_len,
+ Addr new_addr, Addr new_len
+ );
+
+/* There is also VG_(do_mmap_NO_NOTIFY), but that's not declared
+ here (obviously). */
+
+extern SysRes ML_(am_open) ( const Char* pathname, Int flags, Int mode );
+extern void ML_(am_close) ( Int fd );
+extern Int ML_(am_read) ( Int fd, void* buf, Int count);
+extern Int ML_(am_readlink) ( HChar* path, HChar* buf, UInt bufsiz );
+
+/* Get the dev, inode and mode info for a file descriptor, if
+ possible. Returns True on success. */
+extern
+Bool ML_(am_get_fd_d_i_m)( Int fd,
+ /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode );
+
+/* ------ Implemented seperately in aspacemgr-{linux,aix5}.c ------ */
+
+/* Do a sanity check (/proc/self/maps sync check) */
+extern void ML_(am_do_sanity_check)( void );
+
+
+#endif // __PRIV_ASPACEMGR_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/