aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid 'Digit' Turner <digit@google.com>2014-07-04 10:56:28 +0200
committerDavid 'Digit' Turner <digit@google.com>2014-07-04 10:56:28 +0200
commit0e5ff1bd3073e3847ac8400ba46814878beb8605 (patch)
tree0a136c9faf8afda64b443163b7d35d9cca16aaaf /include
parentc22537fdf4bedb53daf10122ede32e718b9402b3 (diff)
downloadqemu-0e5ff1bd3073e3847ac8400ba46814878beb8605.tar.gz
memcheck: Remove feature entirely
This patch removes the memcheck feature from the Android emulator code base. This is for several reasons: - Supporting the feature impacts many QEMU-specific emulation files in subtle ways, that make it difficult to refactor this code to integrate upstream changes. - The feature only works for ARM (32-bit), has no unit tests, and generates massive amounts of false positive messages coming from the platform. - Barely anyone uses it. This feature might make a comeback in the future, but this will be under a different life-form that will have adapted to the new state of the code. + As a bonus, fix the build! Change-Id: Idd19a3bc7923379cb1e82850f14499549b6a991b
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h68
-rw-r--r--include/exec/softmmu_template.h164
2 files changed, 1 insertions, 231 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 7befcdd588..6939cb40f3 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -182,18 +182,6 @@ struct TranslationBlock {
struct TranslationBlock *jmp_next[2];
struct TranslationBlock *jmp_first;
uint32_t icount;
-
-#ifdef CONFIG_ANDROID_MEMCHECK
- /* Maps PCs in this translation block to corresponding PCs in guest address
- * space. The array is arranged in such way, that every even entry contains
- * PC in the translation block, followed by an odd entry that contains
- * guest PC corresponding to that PC in the translation block. This
- * arrangement is set by tcg_gen_code_common that initializes this array
- * when performing guest code translation. */
- uintptr_t* tpc2gpc;
- /* Number of pairs (pc_tb, pc_guest) in tpc2gpc array. */
- unsigned int tpc2gpc_pairs;
-#endif // CONFIG_ANDROID_MEMCHECK
};
#include "exec/spinlock.h"
@@ -235,67 +223,13 @@ static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
-#ifdef CONFIG_ANDROID_MEMCHECK
-/* Gets translated PC for a given (translated PC, guest PC) pair.
- * Return:
- * Translated PC, or NULL if pair index was too large.
- */
-static inline target_ulong
-tb_get_tb_pc(const TranslationBlock* tb, unsigned int pair)
-{
- return (tb->tpc2gpc != NULL && pair < tb->tpc2gpc_pairs) ?
- tb->tpc2gpc[pair * 2] : 0;
-}
-
-/* Gets guest PC for a given (translated PC, guest PC) pair.
- * Return:
- * Guest PC, or NULL if pair index was too large.
- */
-static inline target_ulong
-tb_get_guest_pc(const TranslationBlock* tb, unsigned int pair)
-{
- return (tb->tpc2gpc != NULL && pair < tb->tpc2gpc_pairs) ?
- tb->tpc2gpc[pair * 2 + 1] : 0;
-}
-
-/* Gets guest PC for a given translated PC.
- * Return:
- * Guest PC for a given translated PC, or NULL if there was no pair, matching
- * translated PC in tb's tpc2gpc array.
- */
-static inline target_ulong
-tb_search_guest_pc_from_tb_pc(const TranslationBlock* tb, target_ulong tb_pc)
-{
- if (tb->tpc2gpc != NULL && tb->tpc2gpc_pairs != 0) {
- unsigned int m_min = 0;
- unsigned int m_max = (tb->tpc2gpc_pairs - 1) << 1;
- /* Make sure that tb_pc is within TB array. */
- if (tb_pc < tb->tpc2gpc[0]) {
- return 0;
- }
- while (m_min <= m_max) {
- const unsigned int m = ((m_min + m_max) >> 1) & ~1;
- if (tb_pc < tb->tpc2gpc[m]) {
- m_max = m - 2;
- } else if (m == m_max || tb_pc < tb->tpc2gpc[m + 2]) {
- return tb->tpc2gpc[m + 1];
- } else {
- m_min = m + 2;
- }
- }
- return tb->tpc2gpc[m_max + 1];
- }
- return 0;
-}
-#endif // CONFIG_ANDROID_MEMCHECK
-
void tb_free(TranslationBlock *tb);
void tb_flush(CPUArchState *env);
void tb_link_phys(TranslationBlock *tb,
target_ulong phys_pc, target_ulong phys_page2);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_invalidate_phys_page_fast0(hwaddr start, int len);
-
+
extern uint8_t *code_gen_ptr;
extern int code_gen_max_blocks;
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index 21d9701a54..5ddf5aec04 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -112,20 +112,6 @@
#endif
-#if defined(CONFIG_ANDROID_MEMCHECK) && !defined(SOFTMMU_CODE_ACCESS)
-/*
- * Support for memory access checker.
- * We need to instrument __ldx/__stx_mmu routines implemented in this file with
- * callbacks to access validation routines implemented by the memory checker.
- * Note that (at least for now) we don't do that instrumentation for memory
- * addressing the code (SOFTMMU_CODE_ACCESS controls that). Also, we don't want
- * to instrument code that is used by emulator itself (OUTSIDE_JIT controls
- * that).
- */
-#define CONFIG_ANDROID_MEMCHECK_MMU
-#include "android/qemu/memcheck/memcheck_api.h"
-#endif // CONFIG_ANDROID_MEMCHECK && !SOFTMMU_CODE_ACCESS
-
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
target_ulong addr,
@@ -168,9 +154,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- int invalidate_cache = 0;
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
@@ -203,16 +186,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- /* We only validate access to the guest's user space, for which
- * mmu_idx is set to 1. */
- if (memcheck_instrument_mmu && mmu_idx == 1 &&
- memcheck_validate_ld(addr, DATA_SIZE, (target_ulong)(retaddr - GETPC_ADJ))) {
- /* Memory read breaks page boundary. So, if required, we
- * must invalidate two caches in TLB. */
- invalidate_cache = 2;
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
target_ulong addr1, addr2;
DATA_TYPE res1, res2;
unsigned shift;
@@ -232,14 +205,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
return res;
}
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (DATA_SIZE == 1) {
- if (memcheck_instrument_mmu && mmu_idx == 1) {
- invalidate_cache = memcheck_validate_ld(addr, DATA_SIZE,
- (target_ulong)(retaddr + GETPC_ADJ));
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
@@ -254,20 +219,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
#else
res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
#endif
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (invalidate_cache) {
- /* Accessed memory is under memchecker control. We must invalidate
- * containing page(s) in order to make sure that next access to them
- * will invoke _ld/_st_mmu. */
- env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
- if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
- // Read crossed page boundaris. Invalidate second cache too.
- env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
return res;
}
@@ -285,9 +236,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- int invalidate_cache = 0;
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
@@ -320,16 +268,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- /* We only validate access to the guest's user space, for which
- * mmu_idx is set to 1. */
- if (memcheck_instrument_mmu && mmu_idx == 1 &&
- memcheck_validate_ld(addr, DATA_SIZE, (target_ulong)(retaddr - GETPC_ADJ))) {
- /* Memory read breaks page boundary. So, if required, we
- * must invalidate two caches in TLB. */
- invalidate_cache = 2;
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
target_ulong addr1, addr2;
DATA_TYPE res1, res2;
unsigned shift;
@@ -349,14 +287,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
return res;
}
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (DATA_SIZE == 1) {
- if (memcheck_instrument_mmu && mmu_idx == 1) {
- invalidate_cache = memcheck_validate_ld(addr, DATA_SIZE,
- (target_ulong)(retaddr + GETPC_ADJ));
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
@@ -367,20 +297,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (invalidate_cache) {
- /* Accessed memory is under memchecker control. We must invalidate
- * containing page(s) in order to make sure that next access to them
- * will invoke _ld/_st_mmu. */
- env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
- if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
- // Read crossed page boundaris. Invalidate second cache too.
- env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
return res;
}
#endif /* DATA_SIZE > 1 */
@@ -447,10 +363,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- int invalidate_cache = 0;
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
-
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -485,17 +397,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- /* We only validate access to the guest's user space, for which
- * mmu_idx is set to 1. */
- if (memcheck_instrument_mmu && mmu_idx == 1 &&
- memcheck_validate_st(addr, DATA_SIZE, (uint64_t)val,
- (target_ulong)(retaddr + GETPC_ADJ))) {
- /* Memory write breaks page boundary. So, if required, we
- * must invalidate two caches in TLB. */
- invalidate_cache = 2;
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
@@ -514,17 +415,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
}
return;
}
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (DATA_SIZE == 1) {
- /* We only validate access to the guest's user space, for which
- * mmu_idx is set to 1. */
- if (memcheck_instrument_mmu && mmu_idx == 1) {
- invalidate_cache = memcheck_validate_st(addr, DATA_SIZE,
- (uint64_t)val,
- (target_ulong)(retaddr + GETPC_ADJ));
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
@@ -539,20 +429,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
#else
glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
#endif
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (invalidate_cache) {
- /* Accessed memory is under memchecker control. We must invalidate
- * containing page(s) in order to make sure that next access to them
- * will invoke _ld/_st_mmu. */
- env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
- if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
- // Write crossed page boundaris. Invalidate second cache too.
- env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
}
#if DATA_SIZE > 1
@@ -563,10 +439,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- int invalidate_cache = 0;
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
-
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -601,17 +473,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- /* We only validate access to the guest's user space, for which
- * mmu_idx is set to 1. */
- if (memcheck_instrument_mmu && mmu_idx == 1 &&
- memcheck_validate_st(addr, DATA_SIZE, (uint64_t)val,
- (target_ulong)(retaddr + GETPC_ADJ))) {
- /* Memory write breaks page boundary. So, if required, we
- * must invalidate two caches in TLB. */
- invalidate_cache = 2;
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
@@ -630,17 +491,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
}
return;
}
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (DATA_SIZE == 1) {
- /* We only validate access to the guest's user space, for which
- * mmu_idx is set to 1. */
- if (memcheck_instrument_mmu && mmu_idx == 1) {
- invalidate_cache = memcheck_validate_st(addr, DATA_SIZE,
- (uint64_t)val,
- (target_ulong)(retaddr + GETPC_ADJ));
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
@@ -651,20 +501,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
-#ifdef CONFIG_ANDROID_MEMCHECK_MMU
- if (invalidate_cache) {
- /* Accessed memory is under memchecker control. We must invalidate
- * containing page(s) in order to make sure that next access to them
- * will invoke _ld/_st_mmu. */
- env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
- if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
- // Write crossed page boundaris. Invalidate second cache too.
- env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
- env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
- }
- }
-#endif // CONFIG_ANDROID_MEMCHECK_MMU
}
#endif /* DATA_SIZE > 1 */