aboutsummaryrefslogtreecommitdiff
path: root/test/aarch64
diff options
context:
space:
mode:
authorMartyn Capewell <martyn.capewell@arm.com>2019-01-24 16:40:18 +0000
committerMartyn Capewell <martyn.capewell@arm.com>2019-01-31 10:51:05 +0000
commit37abcf22c9083f0f8201455f94c17a8e6c2db908 (patch)
treeac5b0aff80d5b787d6d14d00d04ac40423ea4735 /test/aarch64
parent4a6b62c4e28f60f2c551ca1b780f9c7773ac5069 (diff)
downloadvixl-37abcf22c9083f0f8201455f94c17a8e6c2db908.tar.gz
Change some tests to speed up test.py
Reduce the number of iterations in the fjcvtzs test and split the decoder and disassembler fuzz tests to make them execute in parallel. Change-Id: I48a8b2a4ac4392d02d2f8d322121a0cad39daf71
Diffstat (limited to 'test/aarch64')
-rw-r--r--test/aarch64/test-assembler-aarch64.cc2
-rw-r--r--test/aarch64/test-fuzz-aarch64.cc126
2 files changed, 59 insertions, 69 deletions
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index 5354d17b..45556081 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -14435,7 +14435,7 @@ TEST(fjcvtzs) {
// integers.
int first_exp_boundary = 52;
int second_exp_boundary = first_exp_boundary + 64;
- for (int exponent = 0; exponent < 2048; exponent++) {
+ for (int exponent = 0; exponent < 2048; exponent += 8) {
int e = exponent - 1023;
uint64_t expected = 0;
diff --git a/test/aarch64/test-fuzz-aarch64.cc b/test/aarch64/test-fuzz-aarch64.cc
index b9f228bc..1273ed7b 100644
--- a/test/aarch64/test-fuzz-aarch64.cc
+++ b/test/aarch64/test-fuzz-aarch64.cc
@@ -25,6 +25,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
+#include <string>
#include "test-runner.h"
@@ -37,84 +38,73 @@
namespace vixl {
namespace aarch64 {
-
-TEST(decoder) {
- // Feed noise into the decoder to check that it doesn't crash.
- // 43 million = ~1% of the instruction space.
- static const int instruction_count = 43 * 1024 * 1024;
-
- uint16_t seed[3] = {1, 2, 3};
- seed48(seed);
-
+static void FuzzHelper(std::string mode, int step_size, int offset, int shift) {
Decoder decoder;
+ PrintDisassembler disasm(stdout);
Instruction buffer[kInstructionSize];
- for (int i = 0; i < instruction_count; i++) {
- uint32_t instr = static_cast<uint32_t>(mrand48());
- buffer->SetInstructionBits(instr);
- decoder.Decode(buffer);
- }
-}
-
-TEST(disasm) {
- // Feed noise into the disassembler to check that it doesn't crash.
- // 9 million = ~0.2% of the instruction space.
- static const int instruction_count = 9 * 1024 * 1024;
-
- uint16_t seed[3] = {42, 43, 44};
- seed48(seed);
-
- Decoder decoder;
- Disassembler disasm;
- Instruction buffer[kInstructionSize];
-
- decoder.AppendVisitor(&disasm);
- for (int i = 0; i < instruction_count; i++) {
- uint32_t instr = static_cast<uint32_t>(mrand48());
- buffer->SetInstructionBits(instr);
- decoder.Decode(buffer);
+ if (mode == "disasm") {
+ decoder.AppendVisitor(&disasm);
+ } else {
+ VIXL_CHECK(mode == "decoder");
}
-}
-#if 0
-// These tests are commented out as they take a long time to run, causing the
-// test script to timeout. After enabling them, they are best run manually:
-//
-// test-runner AARCH64_FUZZ_decoder_pedantic
-// test-runner AARCH64_FUZZ_disasm_pedantic
-//
-
-TEST(decoder_pedantic) {
- // Test the entire instruction space.
- Decoder decoder;
- Instruction buffer[kInstructionSize];
-
- for (uint64_t i = 0; i < (UINT64_C(1) << 32); i++) {
- if ((i & 0xffffff) == 0) {
- fprintf(stderr, "0x%08" PRIx32 "\n", static_cast<uint32_t>(i));
- }
+ for (uint64_t i = offset << shift; i < (UINT64_C(1) << 32); i += step_size) {
buffer->SetInstructionBits(static_cast<uint32_t>(i));
decoder.Decode(buffer);
}
}
-TEST(disasm_pedantic) {
- // Test the entire instruction space. Warning: takes about 30 minutes on a
- // high-end CPU.
- Decoder decoder;
- PrintDisassembler disasm(stdout);
- Instruction buffer[kInstructionSize];
-
- decoder.AppendVisitor(&disasm);
- for (uint64_t i = 0; i < (UINT64_C(1) << 32); i++) {
- if ((i & 0xffff) == 0) {
- fprintf(stderr, "0x%08" PRIx32 "\n", static_cast<uint32_t>(i));
- }
- buffer->SetInstructionBits(static_cast<uint32_t>(i));
- decoder.Decode(buffer);
- }
-}
-#endif
+// Number of shards used to split fuzz tests. This value isn't used in the macro
+// below, so if you change this, ensure more FUZZ_SHARD instances are
+// instantiated.
+static const int kShardCount = 16;
+
+// Test approximately 1% of the instruction space for the decoder, and 0.2% for
+// the disassembler. Multiply the step size by the number of shards issued.
+static const int kDecoderStep = 100 * kShardCount + 1;
+static const int kDisasmStep = 500 * kShardCount + 1;
+
+// Shift the offset argument into the top-level opcode bits, which helps to
+// spread the fuzz coverage across instruction classes.
+static const int kOpFieldShift = 25;
+
+#define FUZZ_SHARD(mode, step, i, shift) \
+ TEST(mode##_##i) { FuzzHelper(#mode, step, i, shift); }
+
+FUZZ_SHARD(decoder, kDecoderStep, 0, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 1, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 2, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 3, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 4, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 5, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 6, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 7, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 8, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 9, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 10, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 11, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 12, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 13, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 14, kOpFieldShift)
+FUZZ_SHARD(decoder, kDecoderStep, 15, kOpFieldShift)
+
+FUZZ_SHARD(disasm, kDisasmStep, 0, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 1, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 2, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 3, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 4, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 5, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 6, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 7, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 8, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 9, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 10, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 11, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 12, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 13, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 14, kOpFieldShift)
+FUZZ_SHARD(disasm, kDisasmStep, 15, kOpFieldShift)
} // namespace aarch64
} // namespace vixl