aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure.py1
-rw-r--r--include/cpuinfo.h5
-rw-r--r--src/arm/cache.c22
-rw-r--r--src/arm/linux/cpuinfo.c38
-rw-r--r--src/arm/uarch.c10
-rw-r--r--test/cpuinfo/scaleway.log17
-rw-r--r--test/scaleway.cc340
-rw-r--r--tools/cpu-info.c6
8 files changed, 436 insertions, 3 deletions
diff --git a/configure.py b/configure.py
index 98885b8..bf5f963 100755
--- a/configure.py
+++ b/configure.py
@@ -78,6 +78,7 @@ def main(args):
if (build.target.is_arm or build.target.is_arm64) and build.target.is_linux:
build.unittest("jetson-tx1-test", build.cxx("jetson-tx1.cc"))
build.unittest("nexus9-test", build.cxx("nexus9.cc"))
+ build.unittest("scaleway-test", build.cxx("scaleway.cc"))
return build
diff --git a/include/cpuinfo.h b/include/cpuinfo.h
index f1f1bf9..c8e339c 100644
--- a/include/cpuinfo.h
+++ b/include/cpuinfo.h
@@ -330,6 +330,8 @@ enum cpuinfo_vendor {
* Processors are designed by Centaur Technology, a subsidiary of VIA Technologies.
*/
cpuinfo_vendor_via = 11,
+ /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */
+ cpuinfo_vendor_cavium = 12,
/* Active vendors of embedded CPUs */
@@ -559,6 +561,9 @@ enum cpuinfo_uarch {
cpuinfo_uarch_twister = 0x00700103,
/** Apple A10 processor. */
cpuinfo_uarch_hurricane = 0x00700104,
+
+ /** Cavium ThunderX. */
+ cpuinfo_uarch_thunderx = 0x00800100,
};
struct cpuinfo_topology {
diff --git a/src/arm/cache.c b/src/arm/cache.c
index 5615d63..486d515 100644
--- a/src/arm/cache.c
+++ b/src/arm/cache.c
@@ -584,6 +584,28 @@ void cpuinfo_arm_decode_cache(
.line_size = 64
};
break;
+ case cpuinfo_uarch_thunderx:
+ /*
+ * "78K-Icache and 32K-D cache per core, 16 MB shared L2 cache" [1]
+ *
+ * [1] https://www.cavium.com/pdfFiles/ThunderX_CP_PB_Rev1.pdf
+ */
+ *l1i = (struct cpuinfo_cache) {
+ .size = 78 * 1024,
+ .associativity = 4 /* assumption */,
+ .line_size = 64 /* assumption */
+ };
+ *l1d = (struct cpuinfo_cache) {
+ .size = 32 * 1024,
+ .associativity = 4 /* assumption */,
+ .line_size = 64 /* assumption */
+ };
+ *l2 = (struct cpuinfo_cache) {
+ .size = 16 * 1024 * 1024,
+ .associativity = 8 /* assumption */,
+ .line_size = 64 /* assumption */
+ };
+ break;
case cpuinfo_uarch_cortex_a12:
case cpuinfo_uarch_cortex_a17:
case cpuinfo_uarch_cortex_a32:
diff --git a/src/arm/linux/cpuinfo.c b/src/arm/linux/cpuinfo.c
index 8702ff3..f1ac00b 100644
--- a/src/arm/linux/cpuinfo.c
+++ b/src/arm/linux/cpuinfo.c
@@ -174,6 +174,14 @@ static void parse_features(
#elif CPUINFO_ARCH_ARM64
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_SHA2;
#endif
+ } else if (memcmp(feature_start, "fphp", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_FPHP;
+ #endif
+ } else if (memcmp(feature_start, "fcma", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_FCMA;
+ #endif
#if CPUINFO_ARCH_ARM
} else if (memcmp(feature_start, "half", feature_length) == 0) {
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_HALF;
@@ -203,6 +211,18 @@ static void parse_features(
#elif CPUINFO_ARCH_ARM64
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_CRC32;
#endif
+ } else if (memcmp(feature_start, "cpuid", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_CPUID;
+ #endif
+ } else if (memcmp(feature_start, "jscvt", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_JSCVT;
+ #endif
+ } else if (memcmp(feature_start, "lrcpc", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_LRCPC;
+ #endif
#if CPUINFO_ARCH_ARM
} else if (memcmp(feature_start, "thumb", feature_length) == 0) {
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_THUMB;
@@ -237,6 +257,14 @@ static void parse_features(
case 7:
if (memcmp(feature_start, "evtstrm", feature_length) == 0) {
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_EVTSTRM;
+ } else if (memcmp(feature_start, "atomics", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_ATOMICS;
+ #endif
+ } else if (memcmp(feature_start, "asimdhp", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_ASIMDHP;
+ #endif
#if CPUINFO_ARCH_ARM
} else if (memcmp(feature_start, "thumbee", feature_length) == 0) {
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_THUMBEE;
@@ -245,17 +273,21 @@ static void parse_features(
goto unexpected;
}
break;
-#if CPUINFO_ARCH_ARM
case 8:
- if (memcmp(feature_start, "fastmult", feature_length) == 0) {
+ if (memcmp(feature_start, "asimdrdm", feature_length) == 0) {
+ #if CPUINFO_ARCH_ARM64
+ proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_ASIMDRDM;
+ #endif
+#if CPUINFO_ARCH_ARM
+ } else if (memcmp(feature_start, "fastmult", feature_length) == 0) {
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_FASTMULT;
} else if (memcmp(feature_start, "vfpv3d16", feature_length) == 0) {
proc_cpuinfo->features |= PROC_CPUINFO_FEATURE_VFPV3D16;
+#endif /* CPUINFO_ARCH_ARM */
} else {
goto unexpected;
}
break;
-#endif /* CPUINFO_ARCH_ARM */
default:
unexpected:
cpuinfo_log_warning("unexpected /proc/cpuinfo features %.*s is ignored",
diff --git a/src/arm/uarch.c b/src/arm/uarch.c
index c0ef6d7..ea8aac6 100644
--- a/src/arm/uarch.c
+++ b/src/arm/uarch.c
@@ -74,6 +74,16 @@ void cpuinfo_arm_decode_vendor_uarch(
}
}
break;
+ case 'C':
+ *vendor = cpuinfo_vendor_cavium;
+ switch (cpu_part) {
+ case 0x0A1:
+ *uarch = cpuinfo_uarch_thunderx;
+ break;
+ default:
+ cpuinfo_log_warning("unknown Cavium CPU part 0x%03"PRIx32" ignored", cpu_part);
+ }
+ break;
#if CPUINFO_ARCH_ARM
case 'i':
*vendor = cpuinfo_vendor_intel;
diff --git a/test/cpuinfo/scaleway.log b/test/cpuinfo/scaleway.log
new file mode 100644
index 0000000..7d7cb8e
--- /dev/null
+++ b/test/cpuinfo/scaleway.log
@@ -0,0 +1,17 @@
+processor : 0
+BogoMIPS : 200.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0a1
+CPU revision : 1
+
+processor : 1
+BogoMIPS : 200.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0a1
+CPU revision : 1
diff --git a/test/scaleway.cc b/test/scaleway.cc
new file mode 100644
index 0000000..1e1acc9
--- /dev/null
+++ b/test/scaleway.cc
@@ -0,0 +1,340 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+#include <cpuinfo-mock.h>
+
+
+TEST(PROCESSORS, count) {
+ ASSERT_EQ(2, cpuinfo_processors_count);
+}
+
+TEST(PROCESSORS, non_null) {
+ ASSERT_TRUE(cpuinfo_processors);
+}
+
+TEST(PROCESSORS, vendor_cavium) {
+ for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ ASSERT_EQ(cpuinfo_vendor_cavium, cpuinfo_processors[i].vendor);
+ }
+}
+
+TEST(PROCESSORS, uarch_thunderx) {
+ for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ ASSERT_EQ(cpuinfo_uarch_thunderx, cpuinfo_processors[i].uarch);
+ }
+}
+
+#if CPUINFO_ARCH_ARM
+TEST(ISA, thumb) {
+ ASSERT_TRUE(cpuinfo_isa.thumb);
+}
+
+TEST(ISA, thumb2) {
+ ASSERT_TRUE(cpuinfo_isa.thumb2);
+}
+
+TEST(ISA, thumbee) {
+ ASSERT_FALSE(cpuinfo_isa.thumbee);
+}
+
+TEST(ISA, jazelle) {
+ ASSERT_FALSE(cpuinfo_isa.jazelle);
+}
+
+TEST(ISA, armv5e) {
+ ASSERT_TRUE(cpuinfo_isa.armv5e);
+}
+
+TEST(ISA, armv6) {
+ ASSERT_TRUE(cpuinfo_isa.armv6);
+}
+
+TEST(ISA, armv6k) {
+ ASSERT_TRUE(cpuinfo_isa.armv6k);
+}
+
+TEST(ISA, armv7) {
+ ASSERT_TRUE(cpuinfo_isa.armv7);
+}
+
+TEST(ISA, armv7mp) {
+ ASSERT_TRUE(cpuinfo_isa.armv7mp);
+}
+
+TEST(ISA, idiv) {
+ ASSERT_TRUE(cpuinfo_isa.idiv);
+}
+
+TEST(ISA, vfpv2) {
+ ASSERT_FALSE(cpuinfo_isa.vfpv2);
+}
+
+TEST(ISA, vfpv3) {
+ ASSERT_TRUE(cpuinfo_isa.vfpv3);
+}
+
+TEST(ISA, d32) {
+ ASSERT_TRUE(cpuinfo_isa.d32);
+}
+
+TEST(ISA, fp16) {
+ ASSERT_TRUE(cpuinfo_isa.fp16);
+}
+
+TEST(ISA, fma) {
+ ASSERT_TRUE(cpuinfo_isa.fma);
+}
+
+TEST(ISA, wmmx) {
+ ASSERT_FALSE(cpuinfo_isa.wmmx);
+}
+
+TEST(ISA, wmmx2) {
+ ASSERT_FALSE(cpuinfo_isa.wmmx2);
+}
+
+TEST(ISA, neon) {
+ ASSERT_TRUE(cpuinfo_isa.neon);
+}
+#endif /* CPUINFO_ARCH_ARM */
+
+TEST(ISA, aes) {
+ ASSERT_TRUE(cpuinfo_isa.aes);
+}
+
+TEST(ISA, sha1) {
+ ASSERT_TRUE(cpuinfo_isa.sha1);
+}
+
+TEST(ISA, sha2) {
+ ASSERT_TRUE(cpuinfo_isa.sha2);
+}
+
+TEST(ISA, pmull) {
+ ASSERT_TRUE(cpuinfo_isa.pmull);
+}
+
+TEST(ISA, crc32) {
+ ASSERT_TRUE(cpuinfo_isa.crc32);
+}
+
+#if CPUINFO_ARCH_ARM64
+TEST(ISA, atomics) {
+ ASSERT_TRUE(cpuinfo_isa.atomics);
+}
+
+TEST(ISA, rdm) {
+ ASSERT_FALSE(cpuinfo_isa.rdm);
+}
+
+TEST(ISA, fp16arith) {
+ ASSERT_FALSE(cpuinfo_isa.fp16arith);
+}
+
+TEST(ISA, jscvt) {
+ ASSERT_FALSE(cpuinfo_isa.jscvt);
+}
+
+TEST(ISA, fcma) {
+ ASSERT_FALSE(cpuinfo_isa.fcma);
+}
+#endif /* CPUINFO_ARCH_ARM64 */
+
+TEST(L1I, count) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ ASSERT_EQ(2, l1i.count);
+}
+
+TEST(L1I, non_null) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ ASSERT_TRUE(l1i.instances);
+}
+
+TEST(L1I, size) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(78 * 1024, l1i.instances[k].size);
+ }
+}
+
+TEST(L1I, associativity) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(4, l1i.instances[k].associativity);
+ }
+}
+
+TEST(L1I, sets) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(312, l1i.instances[k].sets);
+ }
+}
+
+TEST(L1I, partitions) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(1, l1i.instances[k].partitions);
+ }
+}
+
+TEST(L1I, line_size) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(64, l1i.instances[k].line_size);
+ }
+}
+
+TEST(L1I, flags) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(0, l1i.instances[k].flags);
+ }
+}
+
+TEST(L1I, processors) {
+ cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
+ for (uint32_t k = 0; k < l1i.count; k++) {
+ ASSERT_EQ(k, l1i.instances[k].thread_start);
+ ASSERT_EQ(1, l1i.instances[k].thread_count);
+ }
+}
+
+TEST(L1D, count) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ ASSERT_EQ(2, l1d.count);
+}
+
+TEST(L1D, non_null) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ ASSERT_TRUE(l1d.instances);
+}
+
+TEST(L1D, size) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ }
+}
+
+TEST(L1D, associativity) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(4, l1d.instances[k].associativity);
+ }
+}
+
+TEST(L1D, sets) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(128, l1d.instances[k].sets);
+ }
+}
+
+TEST(L1D, partitions) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(1, l1d.instances[k].partitions);
+ }
+}
+
+TEST(L1D, line_size) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(64, l1d.instances[k].line_size);
+ }
+}
+
+TEST(L1D, flags) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(0, l1d.instances[k].flags);
+ }
+}
+
+TEST(L1D, processors) {
+ cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
+ for (uint32_t k = 0; k < l1d.count; k++) {
+ ASSERT_EQ(k, l1d.instances[k].thread_start);
+ ASSERT_EQ(1, l1d.instances[k].thread_count);
+ }
+}
+
+TEST(L2, count) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ ASSERT_EQ(1, l2.count);
+}
+
+TEST(L2, non_null) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ ASSERT_TRUE(l2.instances);
+}
+
+TEST(L2, size) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(16 * 1024 * 1024, l2.instances[k].size);
+ }
+}
+
+TEST(L2, associativity) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(8, l2.instances[k].associativity);
+ }
+}
+
+TEST(L2, sets) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(32768, l2.instances[k].sets);
+ }
+}
+
+TEST(L2, partitions) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(1, l2.instances[k].partitions);
+ }
+}
+
+TEST(L2, line_size) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(64, l2.instances[k].line_size);
+ }
+}
+
+TEST(L2, flags) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(0, l2.instances[k].flags);
+ }
+}
+
+TEST(L2, processors) {
+ cpuinfo_caches l2 = cpuinfo_get_l2_cache();
+ for (uint32_t k = 0; k < l2.count; k++) {
+ ASSERT_EQ(0, l2.instances[k].thread_start);
+ ASSERT_EQ(2, l2.instances[k].thread_count);
+ }
+}
+
+TEST(L3, none) {
+ cpuinfo_caches l3 = cpuinfo_get_l3_cache();
+ ASSERT_EQ(0, l3.count);
+ ASSERT_FALSE(l3.instances);
+}
+
+TEST(L4, none) {
+ cpuinfo_caches l4 = cpuinfo_get_l4_cache();
+ ASSERT_EQ(0, l4.count);
+ ASSERT_FALSE(l4.instances);
+}
+
+int main(int argc, char* argv[]) {
+ cpuinfo_set_proc_cpuinfo_path("test/cpuinfo/scaleway.log");
+ cpuinfo_initialize();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tools/cpu-info.c b/tools/cpu-info.c
index a464b1d..8de30b7 100644
--- a/tools/cpu-info.c
+++ b/tools/cpu-info.c
@@ -41,6 +41,9 @@ int main(int argc, char** argv) {
case cpuinfo_vendor_via:
printf("Vendor: VIA\n");
break;
+ case cpuinfo_vendor_cavium:
+ printf("Vendor: Cavium\n");
+ break;
default:
printf("Vendor: other (%d)\n", cpuinfo_processors[0].vendor);
}
@@ -243,6 +246,9 @@ int main(int argc, char** argv) {
case cpuinfo_uarch_hurricane:
printf("uArch: Hurricane\n");
break;
+ case cpuinfo_uarch_thunderx:
+ printf("uArch: ThunderX\n");
+ break;
default:
printf("uArch: other (0x%08X)\n", cpuinfo_processors[0].uarch);
}