aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornjn <njn@a5019735-40e9-0310-863c-91ae7b9d1cf9>2006-03-31 11:57:59 +0000
committernjn <njn@a5019735-40e9-0310-863c-91ae7b9d1cf9>2006-03-31 11:57:59 +0000
commitdbf7ca71128c6787ba8a99cbd03c3773ff572d96 (patch)
tree449950b3d0d4b2d1513d805c5d91bad0e538de44
parenta30bec858a229a4c5d2cd7797e970fa99c2043ad (diff)
downloadvalgrind-dbf7ca71128c6787ba8a99cbd03c3773ff572d96.tar.gz
Terminology change: previously in Memcheck we had the four states:
noaccess, writable, readable, other Now they are: noaccess, undefined, defined, partdefined As a result, the following names: make_writable, make_readable, check_writable, check_readable, check_defined have become: make_mem_undefined, make_mem_defined, check_mem_is_addressable, check_mem_is_defined, check_value_is_defined (and likewise for the upper-case versions for client request macros). The old MAKE_* and CHECK_* macros still work for backwards compatibility. This is much better, because the old names were subtly misleading. For example: - "readable" really meant "readable and writable". - "writable" really meant "writable and maybe readable, depending on how the read value is used". - "check_writable" really meant "check writable or readable" The new names avoid these problems. The recently-added macro which was called MAKE_DEFINED is now MAKE_MEM_DEFINED_IF_ADDRESSABLE. I also corrected the spelling of "addressable" in numerous places in memcheck.h. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@5802 a5019735-40e9-0310-863c-91ae7b9d1cf9
-rw-r--r--NEWS24
-rw-r--r--auxprogs/libmpiwrap.c176
-rw-r--r--coregrind/m_mallocfree.c6
-rw-r--r--include/valgrind.h2
-rw-r--r--memcheck/docs/mc-manual.xml22
-rw-r--r--memcheck/mc_include.h8
-rw-r--r--memcheck/mc_main.c995
-rw-r--r--memcheck/mc_malloc_wrappers.c20
-rw-r--r--memcheck/memcheck.h91
-rw-r--r--memcheck/tests/addressable.c22
-rw-r--r--memcheck/tests/clientperm.c8
-rw-r--r--memcheck/tests/custom_alloc.c4
-rw-r--r--memcheck/tests/mempool.c4
-rw-r--r--memcheck/tests/pointer-trace.c2
-rw-r--r--memcheck/tests/post-syscall.c4
-rw-r--r--memcheck/tests/sh-mem.c10
-rw-r--r--memcheck/tests/x86/scalar.c2
17 files changed, 724 insertions, 676 deletions
diff --git a/NEWS b/NEWS
index 40a086a6e..585ee1613 100644
--- a/NEWS
+++ b/NEWS
@@ -28,6 +28,30 @@ In detail:
- XXX: others...
+Other user-visible changes:
+
+- There are some changes to Memcheck's client requests. Some of them have
+ changed names:
+
+ MAKE_NOACCESS --> MAKE_MEM_NOACCESS
+ MAKE_WRITABLE --> MAKE_MEM_UNDEFINED
+ MAKE_READABLE --> MAKE_MEM_DEFINED
+
+ CHECK_WRITABLE --> CHECK_MEM_IS_ADDRESSABLE
+ CHECK_READABLE --> CHECK_MEM_IS_DEFINED
+ CHECK_DEFINED --> CHECK_VALUE_IS_DEFINED
+
+ The reason for the change is that the old names are subtly misleading.
+ The old names will still work, but they are deprecated and may be removed
+ in a future release.
+
+ We also added a new client request:
+
+ MAKE_MEM_DEFINED_IF_ADDRESSABLE(a, len)
+
+ which is like MAKE_MEM_DEFINED but only affects a byte if the byte is
+ already addressable.
+
BUGS FIXED:
XXX
diff --git a/auxprogs/libmpiwrap.c b/auxprogs/libmpiwrap.c
index 5164d241e..20f4b6989 100644
--- a/auxprogs/libmpiwrap.c
+++ b/auxprogs/libmpiwrap.c
@@ -685,35 +685,35 @@ void mpiwrap_walk_type_EXTERNALLY_VISIBLE
*/
static inline
-void check_readable_untyped ( void* buffer, long nbytes )
+void check_mem_is_defined_untyped ( void* buffer, long nbytes )
{
if (nbytes > 0) {
- VALGRIND_CHECK_READABLE(buffer, nbytes);
+ VALGRIND_CHECK_MEM_IS_DEFINED(buffer, nbytes);
}
}
static inline
-void check_writable_untyped ( void* buffer, long nbytes )
+void check_mem_is_addressable_untyped ( void* buffer, long nbytes )
{
if (nbytes > 0) {
- VALGRIND_CHECK_WRITABLE(buffer, nbytes);
+ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(buffer, nbytes);
}
}
static inline
-void make_defined_untyped ( void* buffer, long nbytes )
+void make_mem_defined_if_addressable_untyped ( void* buffer, long nbytes )
{
if (nbytes > 0) {
- VALGRIND_MAKE_DEFINED(buffer, nbytes);
+ VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(buffer, nbytes);
}
}
static inline
-void make_defined_if_success_untyped ( int err,
+void make_mem_defined_if_addressable_if_success_untyped ( int err,
void* buffer, long nbytes )
{
if (err == MPI_SUCCESS && nbytes > 0) {
- VALGRIND_MAKE_DEFINED(buffer, nbytes);
+ VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(buffer, nbytes);
}
}
@@ -721,10 +721,10 @@ void make_defined_if_success_untyped ( int err,
(safe-to-write) state. */
static inline
-void make_writable_untyped ( void* buffer, long nbytes )
+void make_mem_undefined_untyped ( void* buffer, long nbytes )
{
if (nbytes > 0) {
- VALGRIND_MAKE_WRITABLE(buffer, nbytes);
+ VALGRIND_MAKE_MEM_UNDEFINED(buffer, nbytes);
}
}
@@ -739,9 +739,9 @@ void make_writable_untyped ( void* buffer, long nbytes )
initialised data, and cause V to complain if not. */
static
-void check_readable ( char* buffer, long count, MPI_Datatype datatype )
+void check_mem_is_defined ( char* buffer, long count, MPI_Datatype datatype )
{
- walk_type_array( check_readable_untyped, buffer, datatype, count );
+ walk_type_array( check_mem_is_defined_untyped, buffer, datatype, count );
}
@@ -750,9 +750,9 @@ void check_readable ( char* buffer, long count, MPI_Datatype datatype )
initialised or not. */
static
-void check_writable ( void *buffer, long count, MPI_Datatype datatype )
+void check_mem_is_addressable ( void *buffer, long count, MPI_Datatype datatype )
{
- walk_type_array( check_writable_untyped, buffer, datatype, count );
+ walk_type_array( check_mem_is_addressable_untyped, buffer, datatype, count );
}
@@ -760,18 +760,19 @@ void check_writable ( void *buffer, long count, MPI_Datatype datatype )
addressible' state. */
static
-void make_defined ( void *buffer, int count, MPI_Datatype datatype )
+void make_mem_defined_if_addressable ( void *buffer, int count, MPI_Datatype datatype )
{
- walk_type_array( make_defined_untyped, buffer, datatype, count );
+ walk_type_array( make_mem_defined_if_addressable_untyped,
+ buffer, datatype, count );
}
static
void
-make_defined_if_success ( int err, void *buffer, int count,
- MPI_Datatype datatype )
+make_mem_defined_if_addressable_if_success ( int err, void *buffer, int count,
+ MPI_Datatype datatype )
{
if (err == MPI_SUCCESS)
- make_defined(buffer, count, datatype);
+ make_mem_defined_if_addressable(buffer, count, datatype);
}
@@ -812,7 +813,7 @@ int generic_Send(void *buf, int count, MPI_Datatype datatype,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("{,B,S,R}Send");
- check_readable(buf, count, datatype);
+ check_mem_is_defined(buf, count, datatype);
CALL_FN_W_6W(err, fn, buf,count,datatype,dest,tag,comm);
after("{,B,S,R}Send", err);
return err;
@@ -848,11 +849,11 @@ int WRAPPER_FOR(PMPI_Recv)(void *buf, int count, MPI_Datatype datatype,
int err, recv_count = 0;
VALGRIND_GET_ORIG_FN(fn);
before("Recv");
- check_writable(buf, count, datatype);
- check_writable_untyped(status, sizeof(*status));
+ check_mem_is_addressable(buf, count, datatype);
+ check_mem_is_addressable_untyped(status, sizeof(*status));
CALL_FN_W_7W(err, fn, buf,count,datatype,source,tag,comm,status);
if (err == MPI_SUCCESS && count_from_Status(&recv_count,datatype,status)) {
- make_defined(buf, recv_count, datatype);
+ make_mem_defined_if_addressable(buf, recv_count, datatype);
}
after("Recv", err);
return err;
@@ -869,7 +870,7 @@ int WRAPPER_FOR(PMPI_Get_count)(MPI_Status* status,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Get_count");
- check_readable_untyped(status, sizeof(*status));
+ check_mem_is_defined_untyped(status, sizeof(*status));
CALL_FN_W_WWW(err, fn, status,ty,count);
after("Get_count", err);
return err;
@@ -1095,7 +1096,7 @@ static void maybe_complete ( Bool error_in_status,
/* The Irecv detailed in 'shadow' completed. Paint the result
buffer, and delete the entry. */
if (count_from_Status(&recv_count, shadow->datatype, status)) {
- make_defined(shadow->buf, recv_count, shadow->datatype);
+ make_mem_defined_if_addressable(shadow->buf, recv_count, shadow->datatype);
if (opt_verbosity > 1)
fprintf(stderr, "%s %5d: sReq- %p (completed)\n",
preamble, my_pid, request_before);
@@ -1117,10 +1118,10 @@ int generic_Isend(void *buf, int count, MPI_Datatype datatype,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("{,B,S,R}Isend");
- check_readable(buf, count, datatype);
- check_writable_untyped(request, sizeof(*request));
+ check_mem_is_defined(buf, count, datatype);
+ check_mem_is_addressable_untyped(request, sizeof(*request));
CALL_FN_W_7W(err, fn, buf,count,datatype,dest,tag,comm,request);
- make_defined_if_success_untyped(err, request, sizeof(*request));
+ make_mem_defined_if_addressable_if_success_untyped(err, request, sizeof(*request));
after("{,B,S,R}Isend", err);
return err;
}
@@ -1160,11 +1161,11 @@ int WRAPPER_FOR(PMPI_Irecv)( void* buf, int count, MPI_Datatype datatype,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Irecv");
- check_writable(buf, count, datatype);
- check_writable_untyped(request, sizeof(*request));
+ check_mem_is_addressable(buf, count, datatype);
+ check_mem_is_addressable_untyped(request, sizeof(*request));
CALL_FN_W_7W(err, fn, buf,count,datatype,source,tag,comm,request);
if (err == MPI_SUCCESS) {
- make_defined_untyped(request, sizeof(*request));
+ make_mem_defined_if_addressable_untyped(request, sizeof(*request));
add_shadow_Request( *request, buf,count,datatype );
}
after("Irecv", err);
@@ -1185,14 +1186,14 @@ int WRAPPER_FOR(PMPI_Wait)( MPI_Request* request,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Wait");
- check_writable_untyped(status, sizeof(MPI_Status));
- check_readable_untyped(request, sizeof(MPI_Request));
+ check_mem_is_addressable_untyped(status, sizeof(MPI_Status));
+ check_mem_is_defined_untyped(request, sizeof(MPI_Request));
request_before = *request;
CALL_FN_W_WW(err, fn, request,status);
if (err == MPI_SUCCESS) {
maybe_complete(False/*err in status?*/,
request_before, *request, status);
- make_defined_untyped(status, sizeof(MPI_Status));
+ make_mem_defined_if_addressable_untyped(status, sizeof(MPI_Status));
}
after("Wait", err);
return err;
@@ -1210,8 +1211,8 @@ int WRAPPER_FOR(PMPI_Waitall)( int count,
before("Waitall");
if (0) fprintf(stderr, "Waitall: %d\n", count);
for (i = 0; i < count; i++) {
- check_writable_untyped(&statuses[i], sizeof(MPI_Status));
- check_readable_untyped(&requests[i], sizeof(MPI_Request));
+ check_mem_is_addressable_untyped(&statuses[i], sizeof(MPI_Status));
+ check_mem_is_defined_untyped(&requests[i], sizeof(MPI_Request));
}
requests_before = clone_Request_array( count, requests );
CALL_FN_W_WWW(err, fn, count,requests,statuses);
@@ -1221,7 +1222,8 @@ int WRAPPER_FOR(PMPI_Waitall)( int count,
for (i = 0; i < count; i++) {
maybe_complete(e_i_s, requests_before[i], requests[i],
&statuses[i]);
- make_defined_untyped(&statuses[i], sizeof(MPI_Status));
+ make_mem_defined_if_addressable_untyped(&statuses[i],
+ sizeof(MPI_Status));
}
}
if (requests_before)
@@ -1240,15 +1242,15 @@ int WRAPPER_FOR(PMPI_Test)( MPI_Request* request, int* flag,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Test");
- check_writable_untyped(status, sizeof(MPI_Status));
- check_writable_untyped(flag, sizeof(int));
- check_readable_untyped(request, sizeof(MPI_Request));
+ check_mem_is_addressable_untyped(status, sizeof(MPI_Status));
+ check_mem_is_addressable_untyped(flag, sizeof(int));
+ check_mem_is_defined_untyped(request, sizeof(MPI_Request));
request_before = *request;
CALL_FN_W_WWW(err, fn, request,flag,status);
if (err == MPI_SUCCESS && *flag) {
maybe_complete(False/*err in status?*/,
request_before, *request, status);
- make_defined_untyped(status, sizeof(MPI_Status));
+ make_mem_defined_if_addressable_untyped(status, sizeof(MPI_Status));
}
after("Test", err);
return err;
@@ -1265,10 +1267,10 @@ int WRAPPER_FOR(PMPI_Testall)( int count, MPI_Request* requests,
VALGRIND_GET_ORIG_FN(fn);
before("Testall");
if (0) fprintf(stderr, "Testall: %d\n", count);
- check_writable_untyped(flag, sizeof(int));
+ check_mem_is_addressable_untyped(flag, sizeof(int));
for (i = 0; i < count; i++) {
- check_writable_untyped(&statuses[i], sizeof(MPI_Status));
- check_readable_untyped(&requests[i], sizeof(MPI_Request));
+ check_mem_is_addressable_untyped(&statuses[i], sizeof(MPI_Status));
+ check_mem_is_defined_untyped(&requests[i], sizeof(MPI_Request));
}
requests_before = clone_Request_array( count, requests );
CALL_FN_W_WWWW(err, fn, count,requests,flag,statuses);
@@ -1280,7 +1282,7 @@ int WRAPPER_FOR(PMPI_Testall)( int count, MPI_Request* requests,
for (i = 0; i < count; i++) {
maybe_complete(e_i_s, requests_before[i], requests[i],
&statuses[i]);
- make_defined_untyped(&statuses[i], sizeof(MPI_Status));
+ make_mem_defined_if_addressable_untyped(&statuses[i], sizeof(MPI_Status));
}
}
if (requests_before)
@@ -1301,13 +1303,13 @@ int WRAPPER_FOR(PMPI_Iprobe)(int source, int tag,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Iprobe");
- check_writable_untyped(flag, sizeof(*flag));
- check_writable_untyped(status, sizeof(*status));
+ check_mem_is_addressable_untyped(flag, sizeof(*flag));
+ check_mem_is_addressable_untyped(status, sizeof(*status));
CALL_FN_W_5W(err, fn, source,tag,comm,flag,status);
if (err == MPI_SUCCESS) {
- make_defined_untyped(flag, sizeof(*flag));
+ make_mem_defined_if_addressable_untyped(flag, sizeof(*flag));
if (*flag)
- make_defined_untyped(status, sizeof(*status));
+ make_mem_defined_if_addressable_untyped(status, sizeof(*status));
}
after("Iprobe", err);
return err;
@@ -1323,9 +1325,9 @@ int WRAPPER_FOR(PMPI_Probe)(int source, int tag,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Probe");
- check_writable_untyped(status, sizeof(*status));
+ check_mem_is_addressable_untyped(status, sizeof(*status));
CALL_FN_W_WWWW(err, fn, source,tag,comm,status);
- make_defined_if_success_untyped(err, status, sizeof(*status));
+ make_mem_defined_if_addressable_if_success_untyped(err, status, sizeof(*status));
after("Probe", err);
return err;
}
@@ -1341,7 +1343,7 @@ int WRAPPER_FOR(PMPI_Cancel)(MPI_Request* request)
MPI_Request tmp;
VALGRIND_GET_ORIG_FN(fn);
before("Cancel");
- check_writable_untyped(request, sizeof(*request));
+ check_mem_is_addressable_untyped(request, sizeof(*request));
tmp = *request;
CALL_FN_W_W(err, fn, request);
if (err == MPI_SUCCESS)
@@ -1374,14 +1376,14 @@ int WRAPPER_FOR(PMPI_Sendrecv)(
int err, recvcount_actual = 0;
VALGRIND_GET_ORIG_FN(fn);
before("Sendrecv");
- check_readable(sendbuf, sendcount, sendtype);
- check_writable(recvbuf, recvcount, recvtype);
+ check_mem_is_defined(sendbuf, sendcount, sendtype);
+ check_mem_is_addressable(recvbuf, recvcount, recvtype);
CALL_FN_W_12W(err, fn, sendbuf,sendcount,sendtype,dest,sendtag,
recvbuf,recvcount,recvtype,source,recvtag,
comm,status);
if (err == MPI_SUCCESS
&& count_from_Status(&recvcount_actual,recvtype,status)) {
- make_defined(recvbuf, recvcount_actual, recvtype);
+ make_mem_defined_if_addressable(recvbuf, recvcount_actual, recvtype);
}
after("Sendrecv", err);
return err;
@@ -1414,7 +1416,7 @@ int WRAPPER_FOR(PMPI_Type_commit)( MPI_Datatype* ty )
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Type_commit");
- check_readable_untyped(ty, sizeof(*ty));
+ check_mem_is_defined_untyped(ty, sizeof(*ty));
CALL_FN_W_W(err, fn, ty);
after("Type_commit", err);
return err;
@@ -1427,7 +1429,7 @@ int WRAPPER_FOR(PMPI_Type_free)( MPI_Datatype* ty )
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Type_free");
- check_readable_untyped(ty, sizeof(*ty));
+ check_mem_is_defined_untyped(ty, sizeof(*ty));
CALL_FN_W_W(err, fn, ty);
after("Type_free", err);
return err;
@@ -1460,12 +1462,12 @@ int WRAPPER_FOR(PMPI_Bcast)(void *buffer, int count,
before("Bcast");
i_am_sender = root == comm_rank(comm);
if (i_am_sender) {
- check_readable(buffer, count, datatype);
+ check_mem_is_defined(buffer, count, datatype);
} else {
- check_writable(buffer, count, datatype);
+ check_mem_is_addressable(buffer, count, datatype);
}
CALL_FN_W_5W(err, fn, buffer,count,datatype,root,comm);
- make_defined_if_success(err, buffer, count, datatype);
+ make_mem_defined_if_addressable_if_success(err, buffer, count, datatype);
after("Bcast", err);
return err;
}
@@ -1500,14 +1502,14 @@ int WRAPPER_FOR(PMPI_Gather)(
before("Gather");
me = comm_rank(comm);
sz = comm_size(comm);
- check_readable(sendbuf, sendcount, sendtype);
+ check_mem_is_defined(sendbuf, sendcount, sendtype);
if (me == root)
- check_writable(recvbuf, recvcount * sz, recvtype);
+ check_mem_is_addressable(recvbuf, recvcount * sz, recvtype);
CALL_FN_W_8W(err, fn, sendbuf,sendcount,sendtype,
recvbuf,recvcount,recvtype,
root,comm);
if (me == root)
- make_defined_if_success(err, recvbuf, recvcount * sz, recvtype);
+ make_mem_defined_if_addressable_if_success(err, recvbuf, recvcount * sz, recvtype);
after("Gather", err);
return err;
}
@@ -1534,13 +1536,13 @@ int WRAPPER_FOR(PMPI_Scatter)(
before("Scatter");
me = comm_rank(comm);
sz = comm_size(comm);
- check_writable(recvbuf, recvcount, recvtype);
+ check_mem_is_addressable(recvbuf, recvcount, recvtype);
if (me == root)
- check_readable(sendbuf, sendcount * sz, sendtype);
+ check_mem_is_defined(sendbuf, sendcount * sz, sendtype);
CALL_FN_W_8W(err, fn, sendbuf,sendcount,sendtype,
recvbuf,recvcount,recvtype,
root,comm);
- make_defined_if_success(err, recvbuf, recvcount, recvtype);
+ make_mem_defined_if_addressable_if_success(err, recvbuf, recvcount, recvtype);
after("Scatter", err);
return err;
}
@@ -1566,12 +1568,12 @@ int WRAPPER_FOR(PMPI_Alltoall)(
VALGRIND_GET_ORIG_FN(fn);
before("Alltoall");
sz = comm_size(comm);
- check_readable(sendbuf, sendcount * sz, sendtype);
- check_writable(recvbuf, recvcount * sz, recvtype);
+ check_mem_is_defined(sendbuf, sendcount * sz, sendtype);
+ check_mem_is_addressable(recvbuf, recvcount * sz, recvtype);
CALL_FN_W_7W(err, fn, sendbuf,sendcount,sendtype,
recvbuf,recvcount,recvtype,
comm);
- make_defined_if_success(err, recvbuf, recvcount * sz, recvtype);
+ make_mem_defined_if_addressable_if_success(err, recvbuf, recvcount * sz, recvtype);
after("Alltoall", err);
return err;
}
@@ -1598,12 +1600,12 @@ int WRAPPER_FOR(PMPI_Reduce)(void *sendbuf, void *recvbuf,
VALGRIND_GET_ORIG_FN(fn);
before("Reduce");
i_am_root = root == comm_rank(comm);
- check_readable(sendbuf, count, datatype);
+ check_mem_is_defined(sendbuf, count, datatype);
if (i_am_root)
- check_writable(recvbuf, count, datatype);
+ check_mem_is_addressable(recvbuf, count, datatype);
CALL_FN_W_7W(err, fn, sendbuf,recvbuf,count,datatype,op,root,comm);
if (i_am_root)
- make_defined_if_success(err, recvbuf, count, datatype);
+ make_mem_defined_if_addressable_if_success(err, recvbuf, count, datatype);
after("Reduce", err);
return err;
}
@@ -1622,10 +1624,10 @@ int WRAPPER_FOR(PMPI_Allreduce)(void *sendbuf, void *recvbuf,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Allreduce");
- check_readable(sendbuf, count, datatype);
- check_writable(recvbuf, count, datatype);
+ check_mem_is_defined(sendbuf, count, datatype);
+ check_mem_is_addressable(recvbuf, count, datatype);
CALL_FN_W_6W(err, fn, sendbuf,recvbuf,count,datatype,op,comm);
- make_defined_if_success(err, recvbuf, count, datatype);
+ make_mem_defined_if_addressable_if_success(err, recvbuf, count, datatype);
after("Allreduce", err);
return err;
}
@@ -1643,9 +1645,9 @@ int WRAPPER_FOR(PMPI_Op_create)( MPI_User_function* function,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Op_create");
- check_writable_untyped(op, sizeof(*op));
+ check_mem_is_addressable_untyped(op, sizeof(*op));
CALL_FN_W_WWW(err, fn, function,commute,op);
- make_defined_if_success_untyped(err, op, sizeof(*op));
+ make_mem_defined_if_addressable_if_success_untyped(err, op, sizeof(*op));
after("Op_create", err);
return err;
}
@@ -1708,9 +1710,9 @@ int WRAPPER_FOR(PMPI_Comm_rank)(MPI_Comm comm, int *rank)
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Comm_rank");
- check_writable_untyped(rank, sizeof(*rank));
+ check_mem_is_addressable_untyped(rank, sizeof(*rank));
CALL_FN_W_WW(err, fn, comm,rank);
- make_defined_if_success_untyped(err, rank, sizeof(*rank));
+ make_mem_defined_if_addressable_if_success_untyped(err, rank, sizeof(*rank));
after("Comm_rank", err);
return err;
}
@@ -1723,9 +1725,9 @@ int WRAPPER_FOR(PMPI_Comm_size)(MPI_Comm comm, int *size)
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Comm_size");
- check_writable_untyped(size, sizeof(*size));
+ check_mem_is_addressable_untyped(size, sizeof(*size));
CALL_FN_W_WW(err, fn, comm,size);
- make_defined_if_success_untyped(err, size, sizeof(*size));
+ make_mem_defined_if_addressable_if_success_untyped(err, size, sizeof(*size));
after("Comm_size", err);
return err;
}
@@ -1752,8 +1754,8 @@ int WRAPPER_FOR(PMPI_Error_string)( int errorcode, char* string,
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Error_string");
- check_writable_untyped(resultlen, sizeof(int));
- check_writable_untyped(string, MPI_MAX_ERROR_STRING);
+ check_mem_is_addressable_untyped(resultlen, sizeof(int));
+ check_mem_is_addressable_untyped(string, MPI_MAX_ERROR_STRING);
CALL_FN_W_WWW(err, fn, errorcode,string,resultlen);
/* Don't bother to paint the result; we assume the real function
will have filled it with defined characters :-) */
@@ -1776,8 +1778,8 @@ int WRAPPER_FOR(PMPI_Init)(int *argc, char ***argv)
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Init");
- check_readable_untyped(argc, sizeof(int));
- check_readable_untyped(*argv, *argc * sizeof(char**));
+ check_mem_is_defined_untyped(argc, sizeof(int));
+ check_mem_is_defined_untyped(*argv, *argc * sizeof(char**));
CALL_FN_W_WW(err, fn, argc,argv);
after("Init", err);
return err;
@@ -1790,9 +1792,9 @@ int WRAPPER_FOR(PMPI_Initialized)(int* flag)
int err;
VALGRIND_GET_ORIG_FN(fn);
before("Initialized");
- check_writable_untyped(flag, sizeof(int));
+ check_mem_is_addressable_untyped(flag, sizeof(int));
CALL_FN_W_W(err, fn, flag);
- make_defined_if_success_untyped(err, flag, sizeof(int));
+ make_mem_defined_if_addressable_if_success_untyped(err, flag, sizeof(int));
after("Initialized", err);
return err;
}
diff --git a/coregrind/m_mallocfree.c b/coregrind/m_mallocfree.c
index 74022e830..2cdf107eb 100644
--- a/coregrind/m_mallocfree.c
+++ b/coregrind/m_mallocfree.c
@@ -580,7 +580,7 @@ Superblock* newSuperblock ( Arena* a, SizeT cszB )
}
}
vg_assert(NULL != sb);
- //zzVALGRIND_MAKE_WRITABLE(sb, cszB);
+ //zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
sb->n_payload_bytes = cszB - sizeof(Superblock);
a->bytes_mmaped += cszB;
@@ -914,7 +914,7 @@ void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
{
SizeT pszB = bszB_to_pszB(a, bszB);
vg_assert(b_lno == pszB_to_listNo(pszB));
- //zzVALGRIND_MAKE_WRITABLE(b, bszB);
+ //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
// Set the size fields and indicate not-in-use.
set_bszB(b, mk_free_bszB(bszB));
@@ -943,7 +943,7 @@ void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
{
UInt i;
vg_assert(bszB >= min_useful_bszB(a));
- //zzVALGRIND_MAKE_WRITABLE(b, bszB);
+ //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
set_bszB(b, mk_inuse_bszB(bszB));
set_prev_b(b, NULL); // Take off freelist
set_next_b(b, NULL); // ditto
diff --git a/include/valgrind.h b/include/valgrind.h
index e92c58a1c..073d7dd41 100644
--- a/include/valgrind.h
+++ b/include/valgrind.h
@@ -2433,7 +2433,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
If you're allocating memory via superblocks, and then handing out small
chunks of each superblock, if you don't have redzones on your small
- blocks, it's worth marking the superblock with VALGRIND_MAKE_NOACCESS
+ blocks, it's worth marking the superblock with VALGRIND_MAKE_MEM_NOACCESS
when it's created, so that block overruns are detected. But if you can
put redzones on, it's probably better to not do this, so that messages
for small overruns are described in terms of the small block rather than
diff --git a/memcheck/docs/mc-manual.xml b/memcheck/docs/mc-manual.xml
index 026acf369..410ede7ef 100644
--- a/memcheck/docs/mc-manual.xml
+++ b/memcheck/docs/mc-manual.xml
@@ -995,9 +995,9 @@ arguments.</para>
<itemizedlist>
<listitem>
- <para><varname>VALGRIND_MAKE_NOACCESS</varname>,
- <varname>VALGRIND_MAKE_WRITABLE</varname> and
- <varname>VALGRIND_MAKE_READABLE</varname>.
+ <para><varname>VALGRIND_MAKE_MEM_NOACCESS</varname>,
+ <varname>VALGRIND_MAKE_MEM_UNDEFINED</varname> and
+ <varname>VALGRIND_MAKE_MEM_DEFINED</varname>.
These mark address ranges as completely inaccessible,
accessible but containing undefined data, and accessible and
containing defined data, respectively. Subsequent errors may
@@ -1007,6 +1007,12 @@ arguments.</para>
</listitem>
<listitem>
+ <para><varname>VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE</varname>.
+ This is just like <varname>VALGRIND_MAKE_MEM_DEFINED</varname> but only
+ affects those bytes that are already addressable.</para>
+ </listitem>
+
+ <listitem>
<para><varname>VALGRIND_DISCARD</varname>: At some point you may
want Valgrind to stop reporting errors in terms of the blocks
defined by the previous three macros. To do this, the above macros
@@ -1022,8 +1028,8 @@ arguments.</para>
</listitem>
<listitem>
- <para><varname>VALGRIND_CHECK_WRITABLE</varname> and
- <varname>VALGRIND_CHECK_READABLE</varname>: check immediately
+ <para><varname>VALGRIND_CHECK_MEM_IS_ADDRESSABLE</varname> and
+ <varname>VALGRIND_CHECK_MEM_IS_DEFINED</varname>: check immediately
whether or not the given address range has the relevant property,
and if not, print an error message. Also, for the convenience of
the client, returns zero if the relevant property holds; otherwise,
@@ -1033,9 +1039,9 @@ arguments.</para>
</listitem>
<listitem>
- <para><varname>VALGRIND_CHECK_DEFINED</varname>: a quick and easy
- way to find out whether Valgrind thinks a particular variable
- (lvalue, to be precise) is addressible and defined. Prints an error
+ <para><varname>VALGRIND_CHECK_VALUE_IS_DEFINED</varname>: a quick and easy
+ way to find out whether Valgrind thinks a particular value
+ (lvalue, to be precise) is addressable and defined. Prints an error
message if not. Returns no value.</para>
</listitem>
diff --git a/memcheck/mc_include.h b/memcheck/mc_include.h
index 454174988..3a0243a3a 100644
--- a/memcheck/mc_include.h
+++ b/memcheck/mc_include.h
@@ -96,10 +96,10 @@ extern VgHashTable MC_(malloc_list);
extern VgHashTable MC_(mempool_list);
/* Shadow memory functions */
-extern Bool MC_(check_noaccess)( Addr a, SizeT len, Addr* bad_addr );
-extern void MC_(make_noaccess) ( Addr a, SizeT len );
-extern void MC_(make_writable) ( Addr a, SizeT len );
-extern void MC_(make_readable) ( Addr a, SizeT len );
+extern Bool MC_(check_mem_is_noaccess)( Addr a, SizeT len, Addr* bad_addr );
+extern void MC_(make_mem_noaccess) ( Addr a, SizeT len );
+extern void MC_(make_mem_undefined)( Addr a, SizeT len );
+extern void MC_(make_mem_defined) ( Addr a, SizeT len );
extern void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len );
extern void MC_(print_malloc_stats) ( void );
diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c
index 3c862c27b..55fe51109 100644
--- a/memcheck/mc_main.c
+++ b/memcheck/mc_main.c
@@ -112,15 +112,15 @@
All entries in the primary (top-level) map must point to a valid
secondary (second-level) map. Since many of the 64kB chunks will
- have the same status for every bit -- ie. not mapped at all (for unused
- address space) or entirely readable (for code segments) -- there are
- three distinguished secondary maps, which indicate 'noaccess', 'writable'
- and 'readable'. For these uniform 64kB chunks, the primary map entry
- points to the relevant distinguished map. In practice, typically more
- than half of the addressable memory is represented with the 'writable' or
- 'readable' distinguished secondary map, so it gives a good saving. It
- also lets us set the V+A bits of large address regions quickly in
- set_address_range_perms().
+ have the same status for every bit -- ie. noaccess (for unused
+ address space) or entirely addressable and defined (for code segments) --
+ there are three distinguished secondary maps, which indicate 'noaccess',
+ 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
+ map entry points to the relevant distinguished map. In practice,
+ typically more than half of the addressable memory is represented with
+ the 'undefined' or 'defined' distinguished secondary map, so it gives a
+ good saving. It also lets us set the V+A bits of large address regions
+ quickly in set_address_range_perms().
On 64-bit machines it's more complicated. If we followed the same basic
scheme we'd have a four-level table which would require too many memory
@@ -177,13 +177,14 @@
// compression scheme to reduce the size of shadow memory. Each byte of
// memory has 2 bits which indicates its state (ie. V+A bits):
//
-// 00: noaccess (unaddressable but treated as fully defined)
-// 01: writable (addressable and fully undefined)
-// 10: readable (addressable and fully defined)
-// 11: other (addressable and partially defined)
+// 00: noaccess (unaddressable but treated as fully defined)
+// 01: undefined (addressable and fully undefined)
+// 10: defined (addressable and fully defined)
+// 11: partdefined (addressable and partially defined)
//
-// In the "other" case, we use a secondary table to store the V bits. Each
-// entry in the secondary-V-bits table maps a byte address to its 8 V bits.
+// In the "partdefined" case, we use a secondary table to store the V bits.
+// Each entry in the secondary-V-bits table maps a byte address to its 8 V
+// bits.
//
// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
// four bytes (32 bits) of memory are in each chunk. Hence the name
@@ -215,24 +216,24 @@
// These represent eight bits of memory.
#define VA_BITS2_NOACCESS 0x0 // 00b
-#define VA_BITS2_WRITABLE 0x1 // 01b
-#define VA_BITS2_READABLE 0x2 // 10b
-#define VA_BITS2_OTHER 0x3 // 11b
+#define VA_BITS2_UNDEFINED 0x1 // 01b
+#define VA_BITS2_DEFINED 0x2 // 10b
+#define VA_BITS2_PARTDEFINED 0x3 // 11b
// These represent 16 bits of memory.
#define VA_BITS4_NOACCESS 0x0 // 00_00b
-#define VA_BITS4_WRITABLE 0x5 // 01_01b
-#define VA_BITS4_READABLE 0xa // 10_10b
+#define VA_BITS4_UNDEFINED 0x5 // 01_01b
+#define VA_BITS4_DEFINED 0xa // 10_10b
// These represent 32 bits of memory.
#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
-#define VA_BITS8_WRITABLE 0x55 // 01_01_01_01b
-#define VA_BITS8_READABLE 0xaa // 10_10_10_10b
+#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
+#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
// These represent 64 bits of memory.
#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
-#define VA_BITS16_WRITABLE 0x5555 // 01_01_01_01b x 2
-#define VA_BITS16_READABLE 0xaaaa // 10_10_10_10b x 2
+#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
+#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
#define SM_CHUNKS 16384
@@ -260,8 +261,8 @@ typedef
// accessible but undefined, and one for accessible and defined.
// Distinguished secondaries may never be modified.
#define SM_DIST_NOACCESS 0
-#define SM_DIST_WRITABLE 1
-#define SM_DIST_READABLE 2
+#define SM_DIST_UNDEFINED 1
+#define SM_DIST_DEFINED 2
static SecMap sm_distinguished[3];
@@ -293,16 +294,16 @@ static SecMap* copy_for_writing ( SecMap* dist_sm )
/* --------------- Stats --------------- */
-static Int n_issued_SMs = 0;
-static Int n_deissued_SMs = 0;
-static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
-static Int n_writable_SMs = 0;
-static Int n_readable_SMs = 0;
-static Int n_non_DSM_SMs = 0;
-static Int max_noaccess_SMs = 0;
-static Int max_writable_SMs = 0;
-static Int max_readable_SMs = 0;
-static Int max_non_DSM_SMs = 0;
+static Int n_issued_SMs = 0;
+static Int n_deissued_SMs = 0;
+static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
+static Int n_undefined_SMs = 0;
+static Int n_defined_SMs = 0;
+static Int n_non_DSM_SMs = 0;
+static Int max_noaccess_SMs = 0;
+static Int max_undefined_SMs = 0;
+static Int max_defined_SMs = 0;
+static Int max_non_DSM_SMs = 0;
static ULong n_auxmap_searches = 0;
static ULong n_auxmap_cmps = 0;
@@ -314,22 +315,22 @@ static Int max_secVBit_nodes = 0;
static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
{
- if (oldSM == &sm_distinguished[SM_DIST_NOACCESS]) n_noaccess_SMs--;
- else if (oldSM == &sm_distinguished[SM_DIST_WRITABLE]) n_writable_SMs--;
- else if (oldSM == &sm_distinguished[SM_DIST_READABLE]) n_readable_SMs--;
- else { n_non_DSM_SMs--;
- n_deissued_SMs++; }
+ if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
+ else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
+ else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
+ else { n_non_DSM_SMs --;
+ n_deissued_SMs ++; }
- if (newSM == &sm_distinguished[SM_DIST_NOACCESS]) n_noaccess_SMs++;
- else if (newSM == &sm_distinguished[SM_DIST_WRITABLE]) n_writable_SMs++;
- else if (newSM == &sm_distinguished[SM_DIST_READABLE]) n_readable_SMs++;
- else { n_non_DSM_SMs++;
- n_issued_SMs++; }
+ if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
+ else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
+ else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
+ else { n_non_DSM_SMs ++;
+ n_issued_SMs ++; }
- if (n_noaccess_SMs > max_noaccess_SMs) max_noaccess_SMs = n_noaccess_SMs;
- if (n_writable_SMs > max_writable_SMs) max_writable_SMs = n_writable_SMs;
- if (n_readable_SMs > max_readable_SMs) max_readable_SMs = n_readable_SMs;
- if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
+ if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
+ if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
+ if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
+ if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
}
/* --------------- Primary maps --------------- */
@@ -569,7 +570,7 @@ UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
// *** WARNING! ***
// Any time this function is called, if it is possible that vabits2
-// is equal to VA_BITS2_OTHER, then the corresponding entry in the
+// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
// sec-V-bits table must also be set!
static INLINE
void set_vabits2 ( Addr a, UChar vabits2 )
@@ -602,9 +603,9 @@ Bool set_vbits8 ( Addr a, UChar vbits8 )
// Addressable. Convert in-register format to in-memory format.
// Also remove any existing sec V bit entry for the byte if no
// longer necessary.
- if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_READABLE; }
- else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_WRITABLE; }
- else { vabits2 = VA_BITS2_OTHER;
+ if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
+ else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
+ else { vabits2 = VA_BITS2_PARTDEFINED;
set_sec_vbits8(a, vbits8); }
set_vabits2(a, vabits2);
@@ -626,13 +627,13 @@ Bool get_vbits8 ( Addr a, UChar* vbits8 )
UChar vabits2 = get_vabits2(a);
// Convert the in-memory format to in-register format.
- if ( VA_BITS2_READABLE == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
- else if ( VA_BITS2_WRITABLE == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
- else if ( VA_BITS2_NOACCESS == vabits2 ) {
+ if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
+ else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
+ else if ( VA_BITS2_NOACCESS == vabits2 ) {
*vbits8 = V_BITS8_DEFINED; // Make V bits defined!
ok = False;
} else {
- tl_assert( VA_BITS2_OTHER == vabits2 );
+ tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
*vbits8 = get_sec_vbits8(a);
}
return ok;
@@ -642,7 +643,8 @@ Bool get_vbits8 ( Addr a, UChar* vbits8 )
/* --------------- Secondary V bit table ------------ */
// This table holds the full V bit pattern for partially-defined bytes
-// (PDBs) that are represented by VA_BITS2_OTHER in the main shadow memory.
+// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
+// memory.
//
// Note: the nodes in this table can become stale. Eg. if you write a PDB,
// then overwrite the same address with a fully defined byte, the sec-V-bit
@@ -744,7 +746,7 @@ static void gcSecVBitTable(void)
// get_vabits2() for the lookup is not very efficient, but I don't
// think it matters.
for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
- if (VA_BITS2_OTHER == get_vabits2(n->a + i)) {
+ if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
keep = True; // Found a non-stale byte, so keep
break;
}
@@ -963,13 +965,13 @@ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
PROF_EVENT(150, "set_address_range_perms");
/* Check the V+A bits make sense. */
- tl_assert(VA_BITS16_NOACCESS == vabits16 ||
- VA_BITS16_WRITABLE == vabits16 ||
- VA_BITS16_READABLE == vabits16);
+ tl_assert(VA_BITS16_NOACCESS == vabits16 ||
+ VA_BITS16_UNDEFINED == vabits16 ||
+ VA_BITS16_DEFINED == vabits16);
// This code should never write PDBs; ensure this. (See comment above
// set_vabits2().)
- tl_assert(VA_BITS2_OTHER != vabits2);
+ tl_assert(VA_BITS2_PARTDEFINED != vabits2);
if (lenT == 0)
return;
@@ -977,9 +979,9 @@ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
if (lenT > 100 * 1000 * 1000) {
if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
Char* s = "unknown???";
- if (vabits16 == VA_BITS16_NOACCESS) s = "noaccess";
- if (vabits16 == VA_BITS16_WRITABLE) s = "writable";
- if (vabits16 == VA_BITS16_READABLE) s = "readable";
+ if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
+ if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
+ if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
"large range %lu (%s)", lenT, s);
}
@@ -992,7 +994,7 @@ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
// the same value.
// Nb: We don't have to worry about updating the sec-V-bits table
// after these set_vabits2() calls because this code never writes
- // VA_BITS2_OTHER values.
+ // VA_BITS2_PARTDEFINED values.
SizeT i;
for (i = 0; i < lenT; i++) {
set_vabits2(a + i, vabits2);
@@ -1180,40 +1182,40 @@ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
/* --- Set permissions for arbitrary address ranges --- */
-void MC_(make_noaccess) ( Addr a, SizeT len )
+void MC_(make_mem_noaccess) ( Addr a, SizeT len )
{
- PROF_EVENT(40, "MC_(make_noaccess)");
- DEBUG("MC_(make_noaccess)(%p, %lu)\n", a, len);
+ PROF_EVENT(40, "MC_(make_mem_noaccess)");
+ DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
}
-void MC_(make_writable) ( Addr a, SizeT len )
+void MC_(make_mem_undefined) ( Addr a, SizeT len )
{
- PROF_EVENT(41, "MC_(make_writable)");
- DEBUG("MC_(make_writable)(%p, %lu)\n", a, len);
- set_address_range_perms ( a, len, VA_BITS16_WRITABLE, SM_DIST_WRITABLE );
+ PROF_EVENT(41, "MC_(make_mem_undefined)");
+ DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
+ set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
}
-void MC_(make_readable) ( Addr a, SizeT len )
+void MC_(make_mem_defined) ( Addr a, SizeT len )
{
- PROF_EVENT(42, "MC_(make_readable)");
- DEBUG("MC_(make_readable)(%p, %lu)\n", a, len);
- set_address_range_perms ( a, len, VA_BITS16_READABLE, SM_DIST_READABLE );
+ PROF_EVENT(42, "MC_(make_mem_defined)");
+ DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
+ set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
}
/* For each byte in [a,a+len), if the byte is addressable, make it be
defined, but if it isn't addressible, leave it alone. In other
- words a version of mc_make_readable that doesn't mess with
+ words a version of MC_(make_mem_defined) that doesn't mess with
addressibility. Low-performance implementation. */
-static void mc_make_defined ( Addr a, SizeT len )
+static void make_mem_defined_if_addressable ( Addr a, SizeT len )
{
SizeT i;
UChar vabits2;
- DEBUG("mc_make_defined(%p, %llu)\n", a, (ULong)len);
+ DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
for (i = 0; i < len; i++) {
vabits2 = get_vabits2( a+i );
if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
- set_vabits2(a+i, VA_BITS2_READABLE);
+ set_vabits2(a+i, VA_BITS2_DEFINED);
}
}
}
@@ -1238,7 +1240,7 @@ void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
vabits2 = get_vabits2( src+j );
set_vabits2( dst+j, vabits2 );
- if (VA_BITS2_OTHER == vabits2) {
+ if (VA_BITS2_PARTDEFINED == vabits2) {
set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
}
}
@@ -1249,7 +1251,7 @@ void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
vabits2 = get_vabits2( src+i );
set_vabits2( dst+i, vabits2 );
- if (VA_BITS2_OTHER == vabits2) {
+ if (VA_BITS2_PARTDEFINED == vabits2) {
set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
}
}
@@ -1260,25 +1262,25 @@ void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
/* --- Fast case permission setters, for dealing with stacks. --- */
static INLINE
-void make_aligned_word32_writable ( Addr a )
+void make_aligned_word32_undefined ( Addr a )
{
UWord sm_off;
SecMap* sm;
- PROF_EVENT(300, "make_aligned_word32_writable");
+ PROF_EVENT(300, "make_aligned_word32_undefined");
#ifndef PERF_FAST_STACK2
- MC_(make_writable)(a, 4);
+ MC_(make_mem_undefined)(a, 4);
#else
if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(301, "make_aligned_word32_writable-slow1");
- MC_(make_writable)(a, 4);
+ PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
+ MC_(make_mem_undefined)(a, 4);
return;
}
sm = get_secmap_for_writing_low(a);
sm_off = SM_OFF(a);
- sm->vabits8[sm_off] = VA_BITS8_WRITABLE;
+ sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
#endif
}
@@ -1292,11 +1294,11 @@ void make_aligned_word32_noaccess ( Addr a )
PROF_EVENT(310, "make_aligned_word32_noaccess");
#ifndef PERF_FAST_STACK2
- MC_(make_noaccess)(a, 4);
+ MC_(make_mem_noaccess)(a, 4);
#else
if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
- MC_(make_noaccess)(a, 4);
+ MC_(make_mem_noaccess)(a, 4);
return;
}
@@ -1309,25 +1311,25 @@ void make_aligned_word32_noaccess ( Addr a )
/* Nb: by "aligned" here we mean 8-byte aligned */
static INLINE
-void make_aligned_word64_writable ( Addr a )
+void make_aligned_word64_undefined ( Addr a )
{
UWord sm_off16;
SecMap* sm;
- PROF_EVENT(320, "make_aligned_word64_writable");
+ PROF_EVENT(320, "make_aligned_word64_undefined");
#ifndef PERF_FAST_STACK2
- MC_(make_writable)(a, 8);
+ MC_(make_mem_undefined)(a, 8);
#else
if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
- PROF_EVENT(321, "make_aligned_word64_writable-slow1");
- MC_(make_writable)(a, 8);
+ PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
+ MC_(make_mem_undefined)(a, 8);
return;
}
sm = get_secmap_for_writing_low(a);
sm_off16 = SM_OFF_16(a);
- ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_WRITABLE;
+ ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
#endif
}
@@ -1341,11 +1343,11 @@ void make_aligned_word64_noaccess ( Addr a )
PROF_EVENT(330, "make_aligned_word64_noaccess");
#ifndef PERF_FAST_STACK2
- MC_(make_noaccess)(a, 8);
+ MC_(make_mem_noaccess)(a, 8);
#else
if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
- MC_(make_noaccess)(a, 8);
+ MC_(make_mem_noaccess)(a, 8);
return;
}
@@ -1364,9 +1366,9 @@ static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
{
PROF_EVENT(110, "new_mem_stack_4");
if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
}
}
@@ -1374,9 +1376,9 @@ static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
{
PROF_EVENT(120, "die_mem_stack_4");
if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
}
}
@@ -1384,12 +1386,12 @@ static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
{
PROF_EVENT(111, "new_mem_stack_8");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP+4 );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
}
}
@@ -1397,12 +1399,12 @@ static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
{
PROF_EVENT(121, "die_mem_stack_8");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
}
}
@@ -1410,13 +1412,13 @@ static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
{
PROF_EVENT(112, "new_mem_stack_12");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+4 );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
}
}
@@ -1425,13 +1427,13 @@ static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
PROF_EVENT(122, "die_mem_stack_12");
/* Note the -12 in the test */
if (VG_IS_8_ALIGNED(new_SP-12)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
}
}
@@ -1439,14 +1441,14 @@ static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
{
PROF_EVENT(113, "new_mem_stack_16");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+4 );
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP+12 );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
}
}
@@ -1454,14 +1456,14 @@ static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
{
PROF_EVENT(123, "die_mem_stack_16");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
}
}
@@ -1469,18 +1471,18 @@ static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
{
PROF_EVENT(114, "new_mem_stack_32");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+16 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+24 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+4 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+12 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+20 );
- make_aligned_word32_writable ( -VG_STACK_REDZONE_SZB + new_SP+28 );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
}
}
@@ -1488,18 +1490,18 @@ static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
{
PROF_EVENT(124, "die_mem_stack_32");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
} else if (VG_IS_4_ALIGNED(new_SP)) {
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
- make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
}
}
@@ -1507,22 +1509,22 @@ static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
{
PROF_EVENT(115, "new_mem_stack_112");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+16 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+24 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+32 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+40 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+48 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+56 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+64 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+72 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+80 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+88 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+96 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+104);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
}
}
@@ -1530,22 +1532,22 @@ static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
{
PROF_EVENT(125, "die_mem_stack_112");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
}
}
@@ -1553,24 +1555,24 @@ static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
{
PROF_EVENT(116, "new_mem_stack_128");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+16 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+24 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+32 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+40 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+48 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+56 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+64 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+72 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+80 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+88 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+96 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+104);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+112);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+120);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
}
}
@@ -1578,24 +1580,24 @@ static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
{
PROF_EVENT(126, "die_mem_stack_128");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
}
}
@@ -1603,26 +1605,26 @@ static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
{
PROF_EVENT(117, "new_mem_stack_144");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+16 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+24 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+32 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+40 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+48 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+56 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+64 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+72 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+80 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+88 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+96 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+104);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+112);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+120);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+128);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+136);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
}
}
@@ -1630,26 +1632,26 @@ static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
{
PROF_EVENT(127, "die_mem_stack_144");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
}
}
@@ -1657,28 +1659,28 @@ static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
{
PROF_EVENT(118, "new_mem_stack_160");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+8 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+16 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+24 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+32 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+40 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+48 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+56 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+64 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+72 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+80 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+88 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+96 );
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+104);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+112);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+120);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+128);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+136);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+144);
- make_aligned_word64_writable ( -VG_STACK_REDZONE_SZB + new_SP+152);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
} else {
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
}
}
@@ -1686,41 +1688,41 @@ static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
{
PROF_EVENT(128, "die_mem_stack_160");
if (VG_IS_8_ALIGNED(new_SP)) {
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
- make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
} else {
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
}
}
static void mc_new_mem_stack ( Addr a, SizeT len )
{
PROF_EVENT(115, "new_mem_stack");
- MC_(make_writable) ( -VG_STACK_REDZONE_SZB + a, len );
+ MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
}
static void mc_die_mem_stack ( Addr a, SizeT len )
{
PROF_EVENT(125, "die_mem_stack");
- MC_(make_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
}
@@ -1760,34 +1762,34 @@ void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
# if 0
/* Really slow version */
- MC_(make_writable)(base, len);
+ MC_(make_mem_undefined)(base, len);
# endif
# if 0
/* Slow(ish) version, which is fairly easily seen to be correct.
*/
if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
- make_aligned_word64_writable(base + 0);
- make_aligned_word64_writable(base + 8);
- make_aligned_word64_writable(base + 16);
- make_aligned_word64_writable(base + 24);
-
- make_aligned_word64_writable(base + 32);
- make_aligned_word64_writable(base + 40);
- make_aligned_word64_writable(base + 48);
- make_aligned_word64_writable(base + 56);
-
- make_aligned_word64_writable(base + 64);
- make_aligned_word64_writable(base + 72);
- make_aligned_word64_writable(base + 80);
- make_aligned_word64_writable(base + 88);
-
- make_aligned_word64_writable(base + 96);
- make_aligned_word64_writable(base + 104);
- make_aligned_word64_writable(base + 112);
- make_aligned_word64_writable(base + 120);
+ make_aligned_word64_undefined(base + 0);
+ make_aligned_word64_undefined(base + 8);
+ make_aligned_word64_undefined(base + 16);
+ make_aligned_word64_undefined(base + 24);
+
+ make_aligned_word64_undefined(base + 32);
+ make_aligned_word64_undefined(base + 40);
+ make_aligned_word64_undefined(base + 48);
+ make_aligned_word64_undefined(base + 56);
+
+ make_aligned_word64_undefined(base + 64);
+ make_aligned_word64_undefined(base + 72);
+ make_aligned_word64_undefined(base + 80);
+ make_aligned_word64_undefined(base + 88);
+
+ make_aligned_word64_undefined(base + 96);
+ make_aligned_word64_undefined(base + 104);
+ make_aligned_word64_undefined(base + 112);
+ make_aligned_word64_undefined(base + 120);
} else {
- MC_(make_writable)(base, len);
+ MC_(make_mem_undefined)(base, len);
}
# endif
@@ -1815,22 +1817,22 @@ void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
// Finally, we know that the range is entirely within one secmap.
UWord v_off = SM_OFF(a_lo);
UShort* p = (UShort*)(&sm->vabits8[v_off]);
- p[ 0] = VA_BITS16_WRITABLE;
- p[ 1] = VA_BITS16_WRITABLE;
- p[ 2] = VA_BITS16_WRITABLE;
- p[ 3] = VA_BITS16_WRITABLE;
- p[ 4] = VA_BITS16_WRITABLE;
- p[ 5] = VA_BITS16_WRITABLE;
- p[ 6] = VA_BITS16_WRITABLE;
- p[ 7] = VA_BITS16_WRITABLE;
- p[ 8] = VA_BITS16_WRITABLE;
- p[ 9] = VA_BITS16_WRITABLE;
- p[10] = VA_BITS16_WRITABLE;
- p[11] = VA_BITS16_WRITABLE;
- p[12] = VA_BITS16_WRITABLE;
- p[13] = VA_BITS16_WRITABLE;
- p[14] = VA_BITS16_WRITABLE;
- p[15] = VA_BITS16_WRITABLE;
+ p[ 0] = VA_BITS16_UNDEFINED;
+ p[ 1] = VA_BITS16_UNDEFINED;
+ p[ 2] = VA_BITS16_UNDEFINED;
+ p[ 3] = VA_BITS16_UNDEFINED;
+ p[ 4] = VA_BITS16_UNDEFINED;
+ p[ 5] = VA_BITS16_UNDEFINED;
+ p[ 6] = VA_BITS16_UNDEFINED;
+ p[ 7] = VA_BITS16_UNDEFINED;
+ p[ 8] = VA_BITS16_UNDEFINED;
+ p[ 9] = VA_BITS16_UNDEFINED;
+ p[10] = VA_BITS16_UNDEFINED;
+ p[11] = VA_BITS16_UNDEFINED;
+ p[12] = VA_BITS16_UNDEFINED;
+ p[13] = VA_BITS16_UNDEFINED;
+ p[14] = VA_BITS16_UNDEFINED;
+ p[15] = VA_BITS16_UNDEFINED;
return;
}
}
@@ -1853,49 +1855,49 @@ void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
// Finally, we know that the range is entirely within one secmap.
UWord v_off = SM_OFF(a_lo);
UShort* p = (UShort*)(&sm->vabits8[v_off]);
- p[ 0] = VA_BITS16_WRITABLE;
- p[ 1] = VA_BITS16_WRITABLE;
- p[ 2] = VA_BITS16_WRITABLE;
- p[ 3] = VA_BITS16_WRITABLE;
- p[ 4] = VA_BITS16_WRITABLE;
- p[ 5] = VA_BITS16_WRITABLE;
- p[ 6] = VA_BITS16_WRITABLE;
- p[ 7] = VA_BITS16_WRITABLE;
- p[ 8] = VA_BITS16_WRITABLE;
- p[ 9] = VA_BITS16_WRITABLE;
- p[10] = VA_BITS16_WRITABLE;
- p[11] = VA_BITS16_WRITABLE;
- p[12] = VA_BITS16_WRITABLE;
- p[13] = VA_BITS16_WRITABLE;
- p[14] = VA_BITS16_WRITABLE;
- p[15] = VA_BITS16_WRITABLE;
- p[16] = VA_BITS16_WRITABLE;
- p[17] = VA_BITS16_WRITABLE;
- p[18] = VA_BITS16_WRITABLE;
- p[19] = VA_BITS16_WRITABLE;
- p[20] = VA_BITS16_WRITABLE;
- p[21] = VA_BITS16_WRITABLE;
- p[22] = VA_BITS16_WRITABLE;
- p[23] = VA_BITS16_WRITABLE;
- p[24] = VA_BITS16_WRITABLE;
- p[25] = VA_BITS16_WRITABLE;
- p[26] = VA_BITS16_WRITABLE;
- p[27] = VA_BITS16_WRITABLE;
- p[28] = VA_BITS16_WRITABLE;
- p[29] = VA_BITS16_WRITABLE;
- p[30] = VA_BITS16_WRITABLE;
- p[31] = VA_BITS16_WRITABLE;
- p[32] = VA_BITS16_WRITABLE;
- p[33] = VA_BITS16_WRITABLE;
- p[34] = VA_BITS16_WRITABLE;
- p[35] = VA_BITS16_WRITABLE;
+ p[ 0] = VA_BITS16_UNDEFINED;
+ p[ 1] = VA_BITS16_UNDEFINED;
+ p[ 2] = VA_BITS16_UNDEFINED;
+ p[ 3] = VA_BITS16_UNDEFINED;
+ p[ 4] = VA_BITS16_UNDEFINED;
+ p[ 5] = VA_BITS16_UNDEFINED;
+ p[ 6] = VA_BITS16_UNDEFINED;
+ p[ 7] = VA_BITS16_UNDEFINED;
+ p[ 8] = VA_BITS16_UNDEFINED;
+ p[ 9] = VA_BITS16_UNDEFINED;
+ p[10] = VA_BITS16_UNDEFINED;
+ p[11] = VA_BITS16_UNDEFINED;
+ p[12] = VA_BITS16_UNDEFINED;
+ p[13] = VA_BITS16_UNDEFINED;
+ p[14] = VA_BITS16_UNDEFINED;
+ p[15] = VA_BITS16_UNDEFINED;
+ p[16] = VA_BITS16_UNDEFINED;
+ p[17] = VA_BITS16_UNDEFINED;
+ p[18] = VA_BITS16_UNDEFINED;
+ p[19] = VA_BITS16_UNDEFINED;
+ p[20] = VA_BITS16_UNDEFINED;
+ p[21] = VA_BITS16_UNDEFINED;
+ p[22] = VA_BITS16_UNDEFINED;
+ p[23] = VA_BITS16_UNDEFINED;
+ p[24] = VA_BITS16_UNDEFINED;
+ p[25] = VA_BITS16_UNDEFINED;
+ p[26] = VA_BITS16_UNDEFINED;
+ p[27] = VA_BITS16_UNDEFINED;
+ p[28] = VA_BITS16_UNDEFINED;
+ p[29] = VA_BITS16_UNDEFINED;
+ p[30] = VA_BITS16_UNDEFINED;
+ p[31] = VA_BITS16_UNDEFINED;
+ p[32] = VA_BITS16_UNDEFINED;
+ p[33] = VA_BITS16_UNDEFINED;
+ p[34] = VA_BITS16_UNDEFINED;
+ p[35] = VA_BITS16_UNDEFINED;
return;
}
}
}
/* else fall into slow case */
- MC_(make_writable)(base, len);
+ MC_(make_mem_undefined)(base, len);
}
@@ -1920,14 +1922,14 @@ typedef
returns False, and if bad_addr is non-NULL, sets *bad_addr to
indicate the lowest failing address. Functions below are
similar. */
-Bool MC_(check_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
+Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
{
SizeT i;
UWord vabits2;
- PROF_EVENT(60, "mc_check_noaccess");
+ PROF_EVENT(60, "check_mem_is_noaccess");
for (i = 0; i < len; i++) {
- PROF_EVENT(61, "mc_check_noaccess(loop)");
+ PROF_EVENT(61, "check_mem_is_noaccess(loop)");
vabits2 = get_vabits2(a);
if (VA_BITS2_NOACCESS != vabits2) {
if (bad_addr != NULL) *bad_addr = a;
@@ -1938,15 +1940,14 @@ Bool MC_(check_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
return True;
}
-// Note that this succeeds also if the memory is readable.
-static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
+static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
{
SizeT i;
UWord vabits2;
- PROF_EVENT(62, "mc_check_writable");
+ PROF_EVENT(62, "is_mem_addressable");
for (i = 0; i < len; i++) {
- PROF_EVENT(63, "mc_check_writable(loop)");
+ PROF_EVENT(63, "is_mem_addressable(loop)");
vabits2 = get_vabits2(a);
if (VA_BITS2_NOACCESS == vabits2) {
if (bad_addr != NULL) *bad_addr = a;
@@ -1957,17 +1958,17 @@ static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
return True;
}
-static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
+static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
{
SizeT i;
UWord vabits2;
- PROF_EVENT(64, "mc_check_readable");
- DEBUG("mc_check_readable\n");
+ PROF_EVENT(64, "is_mem_defined");
+ DEBUG("is_mem_defined\n");
for (i = 0; i < len; i++) {
- PROF_EVENT(65, "mc_check_readable(loop)");
+ PROF_EVENT(65, "is_mem_defined(loop)");
vabits2 = get_vabits2(a);
- if (VA_BITS2_READABLE != vabits2) {
+ if (VA_BITS2_DEFINED != vabits2) {
// Error! Nb: Report addressability errors in preference to
// definedness errors. And don't report definedeness errors unless
// --undef-value-errors=yes.
@@ -1985,16 +1986,16 @@ static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
examine the actual bytes, to find the end, until we're sure it is
safe to do so. */
-static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
+static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
{
UWord vabits2;
- PROF_EVENT(66, "mc_check_readable_asciiz");
- DEBUG("mc_check_readable_asciiz\n");
+ PROF_EVENT(66, "mc_is_defined_asciiz");
+ DEBUG("mc_is_defined_asciiz\n");
while (True) {
- PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
+ PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
vabits2 = get_vabits2(a);
- if (VA_BITS2_READABLE != vabits2) {
+ if (VA_BITS2_DEFINED != vabits2) {
// Error! Nb: Report addressability errors in preference to
// definedness errors. And don't report definedeness errors unless
// --undef-value-errors=yes.
@@ -2016,15 +2017,12 @@ static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
/*------------------------------------------------------------*/
static
-void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
- Addr base, SizeT size )
+void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
+ Addr base, SizeT size )
{
- Bool ok;
Addr bad_addr;
+ Bool ok = is_mem_addressable ( base, size, &bad_addr );
- /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
- base,base+size-1); */
- ok = mc_check_writable ( base, size, &bad_addr );
if (!ok) {
switch (part) {
case Vg_CoreSysCall:
@@ -2038,23 +2036,17 @@ void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
break;
default:
- VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
+ VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
}
}
}
static
-void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
+void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
Addr base, SizeT size )
{
Addr bad_addr;
- MC_ReadResult res;
-
- res = mc_check_readable ( base, size, &bad_addr );
-
- if (0)
- VG_(printf)("mc_check_is_readable(0x%x, %d, %s) -> %s\n",
- (UInt)base, (Int)size, s, res==MC_Ok ? "yes" : "no" );
+ MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
if (MC_Ok != res) {
Bool isUnaddr = ( MC_AddrErr == res ? True : False );
@@ -2062,7 +2054,7 @@ void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
switch (part) {
case Vg_CoreSysCall:
mc_record_param_error ( tid, bad_addr, /*isReg*/False,
- isUnaddr, s );
+ isUnaddr, s );
break;
case Vg_CoreClientReq: // Kludge: make this a CoreMemErr
@@ -2077,21 +2069,20 @@ void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
break;
default:
- VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
+ VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
}
}
}
static
-void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
+void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
Char* s, Addr str )
{
MC_ReadResult res;
Addr bad_addr = 0; // shut GCC up
- /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
tl_assert(part == Vg_CoreSysCall);
- res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
+ res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
if (MC_Ok != res) {
Bool isUnaddr = ( MC_AddrErr == res ? True : False );
mc_record_param_error ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
@@ -2101,22 +2092,22 @@ void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
static
void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
{
- /* Ignore the permissions, just make it readable. Seems to work... */
+ /* Ignore the permissions, just make it defined. Seems to work... */
DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
- a,(ULong)len,rr,ww,xx);
- MC_(make_readable)(a, len);
+ a, (ULong)len, rr, ww, xx);
+ MC_(make_mem_defined)(a, len);
}
static
void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
{
- MC_(make_readable)(a, len);
+ MC_(make_mem_defined)(a, len);
}
static
void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
{
- MC_(make_readable)(a, len);
+ MC_(make_mem_defined)(a, len);
}
@@ -3070,12 +3061,12 @@ ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
// Handle common case quickly: a is suitably aligned, is mapped, and
// addressible.
// Convert V bits from compact memory form to expanded register form.
- if (EXPECTED_TAKEN(vabits16 == VA_BITS16_READABLE)) {
+ if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
return V_BITS64_DEFINED;
- } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_WRITABLE)) {
+ } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
return V_BITS64_UNDEFINED;
} else {
- /* Slow case: the 8 bytes are not all-readable or all-writable. */
+ /* Slow case: the 8 bytes are not all-defined or all-undefined. */
PROF_EVENT(202, "mc_LOADV64-slow2");
return mc_LOADVn_slow( a, 64, isBigEndian );
}
@@ -3116,16 +3107,16 @@ void mc_STOREV64 ( Addr a, ULong vbytes, Bool isBigEndian )
vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
- (VA_BITS16_READABLE == vabits16 ||
- VA_BITS16_WRITABLE == vabits16) ))
+ (VA_BITS16_DEFINED == vabits16 ||
+ VA_BITS16_UNDEFINED == vabits16) ))
{
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (V_BITS64_DEFINED == vbytes) {
- ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_READABLE;
+ ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
} else if (V_BITS64_UNDEFINED == vbytes) {
- ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_WRITABLE;
+ ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(212, "mc_STOREV64-slow2");
@@ -3176,12 +3167,12 @@ UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
// Convert V bits from compact memory form to expanded register form.
// For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
// Almost certainly not necessary, but be paranoid.
- if (EXPECTED_TAKEN(vabits8 == VA_BITS8_READABLE)) {
+ if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
- } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_WRITABLE)) {
+ } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
} else {
- /* Slow case: the 4 bytes are not all-readable or all-writable. */
+ /* Slow case: the 4 bytes are not all-defined or all-undefined. */
PROF_EVENT(222, "mc_LOADV32-slow2");
return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
}
@@ -3225,22 +3216,22 @@ void mc_STOREV32 ( Addr a, UWord vbytes, Bool isBigEndian )
// all, if we can tell that what we want to write is the same as what is
// already there.
if (V_BITS32_DEFINED == vbytes) {
- if (vabits8 == (UInt)VA_BITS8_READABLE) {
+ if (vabits8 == (UInt)VA_BITS8_DEFINED) {
return;
- } else if (!is_distinguished_sm(sm) && VA_BITS8_WRITABLE == vabits8) {
- sm->vabits8[sm_off] = (UInt)VA_BITS8_READABLE;
+ } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
+ sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
} else {
- // not readable/writable, or distinguished and changing state
+ // not defined/undefined, or distinguished and changing state
PROF_EVENT(232, "mc_STOREV32-slow2");
mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
} else if (V_BITS32_UNDEFINED == vbytes) {
- if (vabits8 == (UInt)VA_BITS8_WRITABLE) {
+ if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
return;
- } else if (!is_distinguished_sm(sm) && VA_BITS8_READABLE == vabits8) {
- sm->vabits8[sm_off] = (UInt)VA_BITS8_WRITABLE;
+ } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
+ sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
} else {
- // not readable/writable, or distinguished and changing state
+ // not defined/undefined, or distinguished and changing state
PROF_EVENT(233, "mc_STOREV32-slow3");
mc_STOREVn_slow( a, 32, (ULong)vbytes, isBigEndian );
}
@@ -3252,16 +3243,16 @@ void mc_STOREV32 ( Addr a, UWord vbytes, Bool isBigEndian )
//---------------------------------------------------------------------------
#else
if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
- (VA_BITS8_READABLE == vabits8 ||
- VA_BITS8_WRITABLE == vabits8) ))
+ (VA_BITS8_DEFINED == vabits8 ||
+ VA_BITS8_UNDEFINED == vabits8) ))
{
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (V_BITS32_DEFINED == vbytes) {
- sm->vabits8[sm_off] = VA_BITS8_READABLE;
+ sm->vabits8[sm_off] = VA_BITS8_DEFINED;
} else if (V_BITS32_UNDEFINED == vbytes) {
- sm->vabits8[sm_off] = VA_BITS8_WRITABLE;
+ sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
} else {
/* Slow but general case -- writing partially defined bytes. */
PROF_EVENT(232, "mc_STOREV32-slow2");
@@ -3312,16 +3303,16 @@ UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
// addressible.
// Convert V bits from compact memory form to expanded register form
// XXX: set the high 16/48 bits of retval to 1 for 64-bit paranoia?
- if (vabits8 == VA_BITS8_READABLE) { return V_BITS16_DEFINED; }
- else if (vabits8 == VA_BITS8_WRITABLE) { return V_BITS16_UNDEFINED; }
+ if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
+ else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
else {
- // The 4 (yes, 4) bytes are not all-readable or all-writable, check
+ // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
// the two sub-bytes.
UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
- if (vabits4 == VA_BITS4_READABLE) { return V_BITS16_DEFINED; }
- else if (vabits4 == VA_BITS4_WRITABLE) { return V_BITS16_UNDEFINED; }
+ if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
+ else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
else {
- /* Slow case: the two bytes are not all-readable or all-writable. */
+ /* Slow case: the two bytes are not all-defined or all-undefined. */
PROF_EVENT(242, "mc_LOADV16-slow2");
return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
}
@@ -3360,17 +3351,17 @@ void mc_STOREV16 ( Addr a, UWord vbytes, Bool isBigEndian )
sm_off = SM_OFF(a);
vabits8 = sm->vabits8[sm_off];
if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
- (VA_BITS8_READABLE == vabits8 ||
- VA_BITS8_WRITABLE == vabits8) ))
+ (VA_BITS8_DEFINED == vabits8 ||
+ VA_BITS8_UNDEFINED == vabits8) ))
{
/* Handle common case quickly: a is suitably aligned, */
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (V_BITS16_DEFINED == vbytes) {
- insert_vabits4_into_vabits8( a, VA_BITS4_READABLE,
+ insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
&(sm->vabits8[sm_off]) );
} else if (V_BITS16_UNDEFINED == vbytes) {
- insert_vabits4_into_vabits8( a, VA_BITS4_WRITABLE,
+ insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
&(sm->vabits8[sm_off]) );
} else {
/* Slow but general case -- writing partially defined bytes. */
@@ -3421,16 +3412,16 @@ UWord MC_(helperc_LOADV8) ( Addr a )
// Handle common case quickly: a is mapped, and the entire
// word32 it lives in is addressible.
// XXX: set the high 24/56 bits of retval to 1 for 64-bit paranoia?
- if (vabits8 == VA_BITS8_READABLE) { return V_BITS8_DEFINED; }
- else if (vabits8 == VA_BITS8_WRITABLE) { return V_BITS8_UNDEFINED; }
+ if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
+ else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
else {
- // The 4 (yes, 4) bytes are not all-readable or all-writable, check
+ // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
// the single byte.
UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
- if (vabits2 == VA_BITS2_READABLE) { return V_BITS8_DEFINED; }
- else if (vabits2 == VA_BITS2_WRITABLE) { return V_BITS8_UNDEFINED; }
+ if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
+ else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
else {
- /* Slow case: the byte is not all-readable or all-writable. */
+ /* Slow case: the byte is not all-defined or all-undefined. */
PROF_EVENT(262, "mc_LOADV8-slow2");
return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
}
@@ -3461,7 +3452,7 @@ void MC_(helperc_STOREV8) ( Addr a, UWord vbyte )
vabits8 = sm->vabits8[sm_off];
if (EXPECTED_TAKEN
( !is_distinguished_sm(sm) &&
- ( (VA_BITS8_READABLE == vabits8 || VA_BITS8_WRITABLE == vabits8)
+ ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
|| (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
)
)
@@ -3471,10 +3462,10 @@ void MC_(helperc_STOREV8) ( Addr a, UWord vbyte )
lives in is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (V_BITS8_DEFINED == vbyte) {
- insert_vabits2_into_vabits8( a, VA_BITS2_READABLE,
+ insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
&(sm->vabits8[sm_off]) );
} else if (V_BITS8_UNDEFINED == vbyte) {
- insert_vabits2_into_vabits8( a, VA_BITS2_WRITABLE,
+ insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
&(sm->vabits8[sm_off]) );
} else {
/* Slow but general case -- writing partially defined bytes. */
@@ -3560,8 +3551,8 @@ static Int mc_get_or_set_vbits_for_client (
// It's actually a tool ClientReq, but Vg_CoreClientReq is the closest
// thing we have.
- mc_check_is_readable(Vg_CoreClientReq, tid, "SET_VBITS(vbits)",
- vbits, szB);
+ check_mem_is_defined(Vg_CoreClientReq, tid, "SET_VBITS(vbits)",
+ vbits, szB);
/* setting */
for (i = 0; i < szB; i++) {
@@ -3579,7 +3570,7 @@ static Int mc_get_or_set_vbits_for_client (
((UChar*)vbits)[i] = vbits8;
}
// The bytes in vbits[] have now been set, so mark them as such.
- MC_(make_readable)(vbits, szB);
+ MC_(make_mem_defined)(vbits, szB);
}
return 1;
@@ -3618,7 +3609,7 @@ Bool mc_is_valid_aligned_word ( Addr a )
} else {
tl_assert(VG_IS_8_ALIGNED(a));
}
- if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
+ if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok) {
return True;
} else {
return False;
@@ -3658,11 +3649,11 @@ static void init_shadow_memory ( void )
sm = &sm_distinguished[SM_DIST_NOACCESS];
for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
- sm = &sm_distinguished[SM_DIST_WRITABLE];
- for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_WRITABLE;
+ sm = &sm_distinguished[SM_DIST_UNDEFINED];
+ for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
- sm = &sm_distinguished[SM_DIST_READABLE];
- for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_READABLE;
+ sm = &sm_distinguished[SM_DIST_DEFINED];
+ for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
/* Set up the primary map. */
/* These entries gradually get overwritten as the used address
@@ -3701,22 +3692,22 @@ static Bool mc_expensive_sanity_check ( void )
/* Check that the 3 distinguished SMs are still as they should be. */
- /* Check noaccess. */
+ /* Check noaccess DSM. */
sm = &sm_distinguished[SM_DIST_NOACCESS];
for (i = 0; i < SM_CHUNKS; i++)
if (sm->vabits8[i] != VA_BITS8_NOACCESS)
bad = True;
- /* Check writable. */
- sm = &sm_distinguished[SM_DIST_WRITABLE];
+ /* Check undefined DSM. */
+ sm = &sm_distinguished[SM_DIST_UNDEFINED];
for (i = 0; i < SM_CHUNKS; i++)
- if (sm->vabits8[i] != VA_BITS8_WRITABLE)
+ if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
bad = True;
- /* Check readable. */
- sm = &sm_distinguished[SM_DIST_READABLE];
+ /* Check defined DSM. */
+ sm = &sm_distinguished[SM_DIST_DEFINED];
for (i = 0; i < SM_CHUNKS; i++)
- if (sm->vabits8[i] != VA_BITS8_READABLE)
+ if (sm->vabits8[i] != VA_BITS8_DEFINED)
bad = True;
if (bad) {
@@ -3998,17 +3989,17 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
return False;
switch (arg[0]) {
- case VG_USERREQ__CHECK_WRITABLE: /* check writable */
- ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
+ case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
+ ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
if (!ok)
mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
/*isUnaddr*/True );
*ret = ok ? (UWord)NULL : bad_addr;
break;
- case VG_USERREQ__CHECK_READABLE: { /* check readable */
+ case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
MC_ReadResult res;
- res = mc_check_readable ( arg[1], arg[2], &bad_addr );
+ res = is_mem_defined ( arg[1], arg[2], &bad_addr );
if (MC_AddrErr == res)
mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
/*isUnaddr*/True );
@@ -4024,23 +4015,23 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
*ret = 0; /* return value is meaningless */
break;
- case VG_USERREQ__MAKE_NOACCESS: /* make no access */
- MC_(make_noaccess) ( arg[1], arg[2] );
+ case VG_USERREQ__MAKE_MEM_NOACCESS:
+ MC_(make_mem_noaccess) ( arg[1], arg[2] );
*ret = -1;
break;
- case VG_USERREQ__MAKE_WRITABLE: /* make writable */
- MC_(make_writable) ( arg[1], arg[2] );
+ case VG_USERREQ__MAKE_MEM_UNDEFINED:
+ MC_(make_mem_undefined) ( arg[1], arg[2] );
*ret = -1;
break;
- case VG_USERREQ__MAKE_READABLE: /* make readable */
- MC_(make_readable) ( arg[1], arg[2] );
+ case VG_USERREQ__MAKE_MEM_DEFINED:
+ MC_(make_mem_defined) ( arg[1], arg[2] );
*ret = -1;
break;
- case VG_USERREQ__MAKE_DEFINED: /* make defined */
- mc_make_defined ( arg[1], arg[2] );
+ case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
+ make_mem_defined_if_addressable ( arg[1], arg[2] );
*ret = -1;
break;
@@ -4098,7 +4089,7 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
*argp[4] = MC_(bytes_suppressed);
// there is no argp[5]
//*argp[5] = MC_(bytes_indirect);
- // XXX need to make *argp[1-4] readable
+ // XXX need to make *argp[1-4] defined
*ret = 0;
return True;
}
@@ -4273,12 +4264,12 @@ static void mc_fini ( Int exitcode )
" memcheck: auxmaps: %lld searches, %lld comparisons",
n_auxmap_searches, n_auxmap_cmps );
- print_SM_info("n_issued ", n_issued_SMs);
- print_SM_info("n_deissued ", n_deissued_SMs);
- print_SM_info("max_noaccess", max_noaccess_SMs);
- print_SM_info("max_writable", max_writable_SMs);
- print_SM_info("max_readable", max_readable_SMs);
- print_SM_info("max_non_DSM ", max_non_DSM_SMs);
+ print_SM_info("n_issued ", n_issued_SMs);
+ print_SM_info("n_deissued ", n_deissued_SMs);
+ print_SM_info("max_noaccess ", max_noaccess_SMs);
+ print_SM_info("max_undefined", max_undefined_SMs);
+ print_SM_info("max_defined ", max_defined_SMs);
+ print_SM_info("max_non_DSM ", max_non_DSM_SMs);
// Three DSMs, plus the non-DSM ones
max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
@@ -4352,8 +4343,8 @@ static void mc_pre_clo_init(void)
MC_MALLOC_REDZONE_SZB );
VG_(track_new_mem_startup) ( mc_new_mem_startup );
- VG_(track_new_mem_stack_signal)( MC_(make_writable) );
- VG_(track_new_mem_brk) ( MC_(make_writable) );
+ VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
+ VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
@@ -4369,9 +4360,9 @@ static void mc_pre_clo_init(void)
// distinct from V bits, then we could handle all this properly.
VG_(track_change_mem_mprotect) ( NULL );
- VG_(track_die_mem_stack_signal)( MC_(make_noaccess) );
- VG_(track_die_mem_brk) ( MC_(make_noaccess) );
- VG_(track_die_mem_munmap) ( MC_(make_noaccess) );
+ VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
+ VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
+ VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
#ifdef PERF_FAST_STACK
VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
@@ -4399,11 +4390,11 @@ static void mc_pre_clo_init(void)
#endif
VG_(track_die_mem_stack) ( mc_die_mem_stack );
- VG_(track_ban_mem_stack) ( MC_(make_noaccess) );
+ VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
- VG_(track_pre_mem_read) ( mc_check_is_readable );
- VG_(track_pre_mem_read_asciiz) ( mc_check_is_readable_asciiz );
- VG_(track_pre_mem_write) ( mc_check_is_writable );
+ VG_(track_pre_mem_read) ( check_mem_is_defined );
+ VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
+ VG_(track_pre_mem_write) ( check_mem_is_addressable );
VG_(track_post_mem_write) ( mc_post_mem_write );
if (MC_(clo_undef_value_errors))
@@ -4432,3 +4423,5 @@ VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
/*--- end ---*/
/*--------------------------------------------------------------------*/
+
+
diff --git a/memcheck/mc_malloc_wrappers.c b/memcheck/mc_malloc_wrappers.c
index a69627a3c..337367faf 100644
--- a/memcheck/mc_malloc_wrappers.c
+++ b/memcheck/mc_malloc_wrappers.c
@@ -133,7 +133,7 @@ MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT size,
the mc->data field isn't visible to the leak checker. If memory
management is working correctly, any pointer returned by VG_(malloc)
should be noaccess as far as the client is concerned. */
- if (!MC_(check_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
+ if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
}
return mc;
@@ -192,9 +192,9 @@ void* MC_(new_block) ( ThreadId tid,
VG_(HT_add_node)( table, create_MC_Chunk(tid, p, size, kind) );
if (is_zeroed)
- MC_(make_readable)( p, size );
+ MC_(make_mem_defined)( p, size );
else
- MC_(make_writable)( p, size );
+ MC_(make_mem_undefined)( p, size );
return (void*)p;
}
@@ -259,7 +259,7 @@ void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
{
/* Note: make redzones noaccess again -- just in case user made them
accessible with a client request... */
- MC_(make_noaccess)( mc->data-rzB, mc->size + 2*rzB );
+ MC_(make_mem_noaccess)( mc->data-rzB, mc->size + 2*rzB );
/* Put it out of harm's way for a while, if not from a client request */
if (MC_AllocCustom != mc->allockind) {
@@ -345,7 +345,7 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_size )
} else if (old_size > new_size) {
/* new size is smaller */
- MC_(make_noaccess)( mc->data+new_size, mc->size-new_size );
+ MC_(make_mem_noaccess)( mc->data+new_size, mc->size-new_size );
mc->size = new_size;
mc->where = VG_(record_ExeContext)(tid);
p_new = p_old;
@@ -357,10 +357,10 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_size )
if (a_new) {
/* First half kept and copied, second half new, red zones as normal */
- MC_(make_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, MC_MALLOC_REDZONE_SZB );
MC_(copy_address_range_state)( (Addr)p_old, a_new, mc->size );
- MC_(make_writable)( a_new+mc->size, new_size-mc->size );
- MC_(make_noaccess)( a_new+new_size, MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_undefined)( a_new+mc->size, new_size-mc->size );
+ MC_(make_mem_noaccess) ( a_new+new_size, MC_MALLOC_REDZONE_SZB );
/* Copy from old to new */
VG_(memcpy)((void*)a_new, p_old, mc->size);
@@ -403,7 +403,7 @@ void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
management is working correctly, anything pointer returned by
VG_(malloc) should be noaccess as far as the client is
concerned. */
- if (!MC_(check_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
+ if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
}
@@ -428,7 +428,7 @@ void MC_(destroy_mempool)(Addr pool)
while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
/* Note: make redzones noaccess again -- just in case user made them
accessible with a client request... */
- MC_(make_noaccess)(mc->data-mp->rzB, mc->size + 2*mp->rzB );
+ MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->size + 2*mp->rzB );
}
// Destroy the chunk table
VG_(HT_destruct)(mp->chunks);
diff --git a/memcheck/memcheck.h b/memcheck/memcheck.h
index ab3c2704d..bbf12a653 100644
--- a/memcheck/memcheck.h
+++ b/memcheck/memcheck.h
@@ -76,12 +76,12 @@
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
- VG_USERREQ__MAKE_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
- VG_USERREQ__MAKE_WRITABLE,
- VG_USERREQ__MAKE_READABLE,
+ VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
+ VG_USERREQ__MAKE_MEM_UNDEFINED,
+ VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
- VG_USERREQ__CHECK_WRITABLE,
- VG_USERREQ__CHECK_READABLE,
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+ VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
@@ -90,7 +90,7 @@ typedef
VG_USERREQ__CREATE_BLOCK,
- VG_USERREQ__MAKE_DEFINED,
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
@@ -101,44 +101,54 @@ typedef
/* Client-code macros to manipulate the state of memory. */
-/* Mark memory at _qzz_addr as unaddressible and undefined for
- _qzz_len bytes. */
-#define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
- VG_USERREQ__MAKE_NOACCESS, \
+ VG_USERREQ__MAKE_MEM_NOACCESS, \
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
-/* Similarly, mark memory at _qzz_addr as addressible but undefined
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
-#define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
- VG_USERREQ__MAKE_WRITABLE, \
+ VG_USERREQ__MAKE_MEM_UNDEFINED, \
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
-/* Similarly, mark memory at _qzz_addr as addressible and defined
+/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
-#define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
- VG_USERREQ__MAKE_READABLE, \
+ VG_USERREQ__MAKE_MEM_DEFINED, \
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
-/* Similar to mark memory at VALGRIND_MAKE_READABLE except that
- addressibility is not altered: bytes which are addressible are
- marked as defined, but those which are not addressible are
- left unchanged. */
-#define VALGRIND_MAKE_DEFINED(_qzz_addr,_qzz_len) \
+/* This is the old name for VALGRIND_MAKE_MEM_NOACCESS. Deprecated. */
+#define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \
+ VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len)
+
+/* This is the old name for VALGRIND_MAKE_MEM_UNDEFINED. Deprecated. */
+#define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \
+ VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len)
+
+/* This is the old name for VALGRIND_MAKE_MEM_DEFINED. Deprecated. */
+#define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \
+ VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len)
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+ not altered: bytes which are addressable are marked as defined,
+ but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
- VG_USERREQ__MAKE_DEFINED, \
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
@@ -169,40 +179,53 @@ typedef
/* Client-code macros to check the state of memory. */
-/* Check that memory at _qzz_addr is addressible for _qzz_len bytes.
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressibility is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
-#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__CHECK_WRITABLE, \
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,\
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
-/* Check that memory at _qzz_addr is addressible and defined for
+/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressibility and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
-#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
- VG_USERREQ__CHECK_READABLE, \
+ VG_USERREQ__CHECK_MEM_IS_DEFINED, \
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
-/* Use this macro to force the definedness and addressibility of a
- value to be checked. If suitable addressibility and definedness
+/* Use this macro to force the definedness and addressibility of an
+ lvalue to be checked. If suitable addressibility and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
-#define VALGRIND_CHECK_DEFINED(__lvalue) \
- VALGRIND_CHECK_READABLE( \
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
+ VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned int)(sizeof (__lvalue)))
+/* This is the old name for VALGRIND_CHECK_MEM_IS_ADDRESSABLE. Deprecated. */
+#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
+ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len)
+
+/* This is the old name for VALGRIND_CHECK_MEM_IS_DEFINED. Deprecated. */
+#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
+ VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len)
+
+/* This is the old name for VALGRIND_CHECK_VALUE_IS_DEFINED. Deprecated. */
+#define VALGRIND_CHECK_DEFINED(__lvalue) \
+ VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue)
+
+
/* Do a memory leak check mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
{unsigned int _qzz_res; \
@@ -236,7 +259,7 @@ typedef
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
- 3 if any parts of zzsrc/zzvbits are not addressible.
+ 3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
@@ -255,7 +278,7 @@ typedef
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
- 3 if any parts of zza/zzvbits are not addressible.
+ 3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
diff --git a/memcheck/tests/addressable.c b/memcheck/tests/addressable.c
index 085f97b10..87227d71a 100644
--- a/memcheck/tests/addressable.c
+++ b/memcheck/tests/addressable.c
@@ -34,18 +34,18 @@ static void test1()
{
char *m = mm(0, pgsz * 5, PROT_READ);
- VALGRIND_CHECK_READABLE(m, pgsz*5); /* all defined */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m, pgsz*5); /* all defined */
}
/* Case 2 - unmapped memory is unaddressable+undefined */
static void test2()
{
char *m = mm(0, pgsz * 5, PROT_READ|PROT_WRITE);
- VALGRIND_CHECK_READABLE(m, pgsz*5); /* all OK */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m, pgsz*5); /* all OK */
munmap(&m[pgsz*2], pgsz);
- VALGRIND_CHECK_READABLE(&m[pgsz*2], pgsz); /* undefined */
+ VALGRIND_CHECK_MEM_IS_DEFINED(&m[pgsz*2], pgsz); /* undefined */
/* XXX need a memcheck request to test addressability */
m[pgsz*2] = 'x'; /* unmapped fault */
@@ -56,9 +56,9 @@ static void test3()
{
char *m = mm(0, pgsz * 5, PROT_READ|PROT_WRITE);
- VALGRIND_MAKE_WRITABLE(&m[pgsz], pgsz);
+ VALGRIND_MAKE_MEM_UNDEFINED(&m[pgsz], pgsz);
mm(&m[pgsz], pgsz, PROT_READ);
- VALGRIND_CHECK_READABLE(&m[pgsz], pgsz); /* OK */
+ VALGRIND_CHECK_MEM_IS_DEFINED(&m[pgsz], pgsz); /* OK */
}
/* Case 4 - mprotect doesn't affect addressability */
@@ -67,7 +67,7 @@ static void test4()
char *m = mm(0, pgsz * 5, PROT_READ|PROT_WRITE);
mprotect(m, pgsz, PROT_WRITE);
- VALGRIND_CHECK_READABLE(m, pgsz); /* OK */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m, pgsz); /* OK */
m[44] = 'y'; /* OK */
mprotect(m, pgsz*5, PROT_NONE);
@@ -79,16 +79,16 @@ static void test5()
{
char *m = mm(0, pgsz * 5, PROT_READ|PROT_WRITE);
- VALGRIND_MAKE_WRITABLE (m, pgsz*5);
+ VALGRIND_MAKE_MEM_UNDEFINED(m, pgsz*5);
memset(m, 'x', 10);
- VALGRIND_CHECK_READABLE(m, 10); /* OK */
- VALGRIND_CHECK_READABLE(m+10, 10); /* BAD */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m, 10); /* OK */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m+10, 10); /* BAD */
mprotect(m, pgsz*5, PROT_NONE);
mprotect(m, pgsz*5, PROT_READ);
- VALGRIND_CHECK_READABLE(m, 10); /* still OK */
- VALGRIND_CHECK_READABLE(m+20, 10); /* BAD */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m, 10); /* still OK */
+ VALGRIND_CHECK_MEM_IS_DEFINED(m+20, 10); /* BAD */
}
static struct test {
diff --git a/memcheck/tests/clientperm.c b/memcheck/tests/clientperm.c
index 27cb11a07..5edf4a82a 100644
--- a/memcheck/tests/clientperm.c
+++ b/memcheck/tests/clientperm.c
@@ -7,7 +7,7 @@
int main1 ( void )
{
int xxx, i;
- for (i = 0; i < 10; i++) VALGRIND_CHECK_DEFINED(xxx);
+ for (i = 0; i < 10; i++) VALGRIND_CHECK_VALUE_IS_DEFINED(xxx);
return 0;
}
@@ -17,10 +17,10 @@ int main ( void )
char* aa = calloc(100,1);
sum = 0;
- VALGRIND_CHECK_READABLE(aa,100);
+ VALGRIND_CHECK_MEM_IS_DEFINED(aa,100);
- m = VALGRIND_MAKE_WRITABLE( &aa[49], 1 );
- VALGRIND_CHECK_WRITABLE(aa,100);
+ m = VALGRIND_MAKE_MEM_UNDEFINED( &aa[49], 1 );
+ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(aa,100);
printf("m_na: returned value is %d\n", m );
diff --git a/memcheck/tests/custom_alloc.c b/memcheck/tests/custom_alloc.c
index 36c7952e5..38004cd28 100644
--- a/memcheck/tests/custom_alloc.c
+++ b/memcheck/tests/custom_alloc.c
@@ -22,7 +22,7 @@ void* get_superblock(void)
// program to be using it unless its handed out by custom_alloc()
// with redzones, better not to have it
- VALGRIND_MAKE_NOACCESS(p, SUPERBLOCK_SIZE);
+ VALGRIND_MAKE_MEM_NOACCESS(p, SUPERBLOCK_SIZE);
return p;
}
@@ -86,7 +86,7 @@ int main(void)
custom_free(array3); // mismatched free (ok without MALLOCLIKE)
make_leak();
- return array[0]; // use after free (ok without MALLOCLIKE/MAKE_NOACCESS)
+ return array[0]; // use after free (ok without MALLOCLIKE/MAKE_MEM_NOACCESS)
// (nb: initialised because is_zeroed==1 above)
// unfortunately not identified as being in a free'd
// block because the freeing of the block and shadow
diff --git a/memcheck/tests/mempool.c b/memcheck/tests/mempool.c
index f40c1b88a..f79484c46 100644
--- a/memcheck/tests/mempool.c
+++ b/memcheck/tests/mempool.c
@@ -40,7 +40,7 @@ pool *make_pool()
p->size = p->left = SUPERBLOCK_SIZE;
p->levels = NULL;
- VALGRIND_MAKE_NOACCESS(p->where, SUPERBLOCK_SIZE);
+ VALGRIND_MAKE_MEM_NOACCESS(p->where, SUPERBLOCK_SIZE);
return p;
}
@@ -66,7 +66,7 @@ void pop(pool *p)
level_list *l = p->levels;
p->levels = l->next;
VALGRIND_DESTROY_MEMPOOL(l->where);
- VALGRIND_MAKE_NOACCESS(l->where, p->where-l->where);
+ VALGRIND_MAKE_MEM_NOACCESS(l->where, p->where-l->where);
p->where = l->where;
if(USE_MMAP)
munmap(l, sizeof(level_list));
diff --git a/memcheck/tests/pointer-trace.c b/memcheck/tests/pointer-trace.c
index 7449bc0f6..b59de87ff 100644
--- a/memcheck/tests/pointer-trace.c
+++ b/memcheck/tests/pointer-trace.c
@@ -67,7 +67,7 @@ int main()
perror("trap 4 failed");
else {
munmap(map, 256*1024);
- VALGRIND_MAKE_READABLE(map, 256*1024); /* great big fat lie */
+ VALGRIND_MAKE_MEM_DEFINED(map, 256*1024); /* great big fat lie */
}
VALGRIND_DO_LEAK_CHECK;
diff --git a/memcheck/tests/post-syscall.c b/memcheck/tests/post-syscall.c
index 1563a7339..cab480051 100644
--- a/memcheck/tests/post-syscall.c
+++ b/memcheck/tests/post-syscall.c
@@ -31,8 +31,8 @@ int main()
if (ret != -1 || errno != EINTR) {
printf("FAILED: expected nanosleep to be interrupted\n");
} else {
- VALGRIND_CHECK_DEFINED(rem);
- printf("PASSED\n"); /* assuming CHECK_DEFINED doesn't print anything */
+ VALGRIND_CHECK_VALUE_IS_DEFINED(rem);
+ printf("PASSED\n"); /* assuming CHECK_VALUE_IS_DEFINED doesn't print anything */
}
return 0;
diff --git a/memcheck/tests/sh-mem.c b/memcheck/tests/sh-mem.c
index caa5b8563..3975091c8 100644
--- a/memcheck/tests/sh-mem.c
+++ b/memcheck/tests/sh-mem.c
@@ -41,7 +41,7 @@ U8 build(int size, U1 byte)
U8 mask = 0;
U8 shres;
U8 res = 0xffffffffffffffffULL, res2;
- VALGRIND_MAKE_WRITABLE(&res, 8);
+ VALGRIND_MAKE_MEM_UNDEFINED(&res, 8);
assert(1 == size || 2 == size || 4 == size || 8 == size);
for (i = 0; i < size; i++) {
@@ -56,7 +56,7 @@ U8 build(int size, U1 byte)
VALGRIND_GET_VBITS(&res, &shres, 8);
res2 = res;
- VALGRIND_MAKE_READABLE(&res2, 8); // avoid the 'undefined' warning
+ VALGRIND_MAKE_MEM_DEFINED(&res2, 8); // avoid the 'undefined' warning
assert(res2 == shres);
return res;
}
@@ -110,7 +110,7 @@ int main(void)
//
// which is useful for testing below.
undefA = calloc(1, 256); // one for each possible undefinedness value
- VALGRIND_MAKE_WRITABLE(undefA, 256);
+ VALGRIND_MAKE_MEM_UNDEFINED(undefA, 256);
for (i = 0; i < 256; i++) {
undefA[i] &= i;
}
@@ -150,8 +150,8 @@ int main(void)
/* the output of build() into a variable of type 'Ty'. */ \
U8 tmpDef = tmp; \
ITy undefN_ITyDef = undefN_ITy; \
- VALGRIND_MAKE_READABLE(&tmpDef, 8 ); \
- VALGRIND_MAKE_READABLE(&undefN_ITyDef, NNN); \
+ VALGRIND_MAKE_MEM_DEFINED(&tmpDef, 8 ); \
+ VALGRIND_MAKE_MEM_DEFINED(&undefN_ITyDef, NNN); \
assert(tmpDef == (U8)undefN_ITyDef); \
} \
\
diff --git a/memcheck/tests/x86/scalar.c b/memcheck/tests/x86/scalar.c
index b5810a7d0..e7bc3201d 100644
--- a/memcheck/tests/x86/scalar.c
+++ b/memcheck/tests/x86/scalar.c
@@ -821,7 +821,7 @@ int main(void)
ss.ss_sp = NULL;
ss.ss_flags = 0;
ss.ss_size = 0;
- VALGRIND_MAKE_NOACCESS(& ss, sizeof(struct our_sigaltstack));
+ VALGRIND_MAKE_MEM_NOACCESS(& ss, sizeof(struct our_sigaltstack));
GO(__NR_sigaltstack, "2s 2m");
SY(__NR_sigaltstack, x0+&ss, x0+&ss); SUCC;
}