aboutsummaryrefslogtreecommitdiff
path: root/libdislocator
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-02 18:49:43 +0200
committerAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-02 18:49:43 +0200
commitb24639d0113e15933e749ea0f96abe3f25a134a0 (patch)
tree4272020625c80c0d6982d3787bebc573c0da01b8 /libdislocator
parent2ae4ca91b48407add0e940ee13bd8b385e319a7a (diff)
downloadAFLplusplus-b24639d0113e15933e749ea0f96abe3f25a134a0.tar.gz
run code formatter
Diffstat (limited to 'libdislocator')
-rw-r--r--libdislocator/libdislocator.so.c63
1 files changed, 35 insertions, 28 deletions
diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c
index 71620b17..5104fed4 100644
--- a/libdislocator/libdislocator.so.c
+++ b/libdislocator/libdislocator.so.c
@@ -38,23 +38,35 @@
/* Error / message handling: */
-#define DEBUGF(_x...) do { \
- if (alloc_verbose) { \
- if (++call_depth == 1) { \
+#define DEBUGF(_x...) \
+ do { \
+ \
+ if (alloc_verbose) { \
+ \
+ if (++call_depth == 1) { \
+ \
fprintf(stderr, "[AFL] " _x); \
- fprintf(stderr, "\n"); \
- } \
- call_depth--; \
- } \
+ fprintf(stderr, "\n"); \
+ \
+ } \
+ call_depth--; \
+ \
+ } \
+ \
} while (0)
-#define FATAL(_x...) do { \
- if (++call_depth == 1) { \
+#define FATAL(_x...) \
+ do { \
+ \
+ if (++call_depth == 1) { \
+ \
fprintf(stderr, "*** [AFL] " _x); \
- fprintf(stderr, " ***\n"); \
- abort(); \
- } \
- call_depth--; \
+ fprintf(stderr, " ***\n"); \
+ abort(); \
+ \
+ } \
+ call_depth--; \
+ \
} while (0)
/* Macro to count the number of pages needed to store a buffer: */
@@ -63,7 +75,7 @@
/* Canary & clobber bytes: */
-#define ALLOC_CANARY 0xAACCAACC
+#define ALLOC_CANARY 0xAACCAACC
#define ALLOC_CLOBBER 0xCC
#define PTR_C(_p) (((u32*)(_p))[-1])
@@ -73,14 +85,13 @@
static u32 max_mem = MAX_ALLOC; /* Max heap usage to permit */
static u8 alloc_verbose, /* Additional debug messages */
- hard_fail, /* abort() when max_mem exceeded? */
- no_calloc_over; /* abort() on calloc() overflows? */
+ hard_fail, /* abort() when max_mem exceeded? */
+ no_calloc_over; /* abort() on calloc() overflows? */
static __thread size_t total_mem; /* Currently allocated mem */
static __thread u32 call_depth; /* To avoid recursion via fprintf() */
-
/* This is the main alloc function. It allocates one page more than necessary,
sets that tailing page to PROT_NONE, and then increments the return address
so that it is right-aligned to that boundary. Since it always uses mmap(),
@@ -90,14 +101,11 @@ static void* __dislocator_alloc(size_t len) {
void* ret;
-
if (total_mem + len > max_mem || total_mem + len < total_mem) {
- if (hard_fail)
- FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
+ if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
- DEBUGF("total allocs exceed %u MB, returning NULL",
- max_mem / 1024 / 1024);
+ DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024);
return NULL;
@@ -142,7 +150,6 @@ static void* __dislocator_alloc(size_t len) {
}
-
/* The "user-facing" wrapper for calloc(). This just checks for overflows and
displays debug messages if requested. */
@@ -157,8 +164,11 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
if (elem_cnt && len / elem_cnt != elem_len) {
if (no_calloc_over) {
- DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, elem_cnt);
+
+ DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
+ elem_cnt);
return NULL;
+
}
FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
@@ -174,7 +184,6 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
}
-
/* The wrapper for malloc(). Roughly the same, also clobbers the returned
memory (unlike calloc(), malloc() is not guaranteed to return zeroed
memory). */
@@ -193,7 +202,6 @@ void* malloc(size_t len) {
}
-
/* The wrapper for free(). This simply marks the entire region as PROT_NONE.
If the region is already freed, the code will segfault during the attempt to
read the canary. Not very graceful, but works, right? */
@@ -224,7 +232,6 @@ void free(void* ptr) {
}
-
/* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
move data, and then free (aka mprotect()) the original one. */
@@ -249,7 +256,6 @@ void* realloc(void* ptr, size_t len) {
}
-
__attribute__((constructor)) void __dislocator_init(void) {
u8* tmp = getenv("AFL_LD_LIMIT_MB");
@@ -266,3 +272,4 @@ __attribute__((constructor)) void __dislocator_init(void) {
no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
}
+