aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMarat Dukhan <marat@fb.com>2017-09-26 18:35:52 -0700
committerMarat Dukhan <marat@fb.com>2017-09-26 18:35:52 -0700
commit3040197bc3eb13796351e74a2e7a6f2bcc081752 (patch)
tree2387e26e11be3307d931e4741d2f3ae559e0bb09 /test
parent2b24889379602c17d2448c891e25c76d2b039ffc (diff)
downloadcpuinfo-3040197bc3eb13796351e74a2e7a6f2bcc081752.tar.gz
Major API refactoring
Diffstat (limited to 'test')
-rw-r--r--test/arm-cache.cc3
-rw-r--r--test/blu-r1-hd.cc245
-rw-r--r--test/galaxy-c9-pro.cc277
-rw-r--r--test/galaxy-grand-prime-value-edition.cc243
-rw-r--r--test/galaxy-j7-tmobile.cc253
-rw-r--r--test/galaxy-j7-uae.cc253
-rw-r--r--test/galaxy-s3-us.cc245
-rw-r--r--test/galaxy-s4-us.cc245
-rw-r--r--test/galaxy-s5-global.cc273
-rw-r--r--test/galaxy-s5-us.cc245
-rw-r--r--test/galaxy-s6.cc277
-rw-r--r--test/galaxy-s7-global.cc279
-rw-r--r--test/galaxy-s7-us.cc259
-rw-r--r--test/galaxy-s8-global.cc279
-rw-r--r--test/galaxy-s8-us.cc281
-rw-r--r--test/galaxy-tab-3-7.0.cc243
-rw-r--r--test/galaxy-win-duos.cc243
-rw-r--r--test/get-current.cc34
-rw-r--r--test/huawei-mate-8.cc277
-rw-r--r--test/huawei-p9-lite.cc253
-rw-r--r--test/init.cc350
-rw-r--r--test/lenovo-a6600-plus.cc243
-rw-r--r--test/lenovo-vibe-x2.cc273
-rw-r--r--test/lg-k10-eu.cc243
-rw-r--r--test/meizu-pro-6s.cc289
-rw-r--r--test/memo-pad-7.cc247
-rw-r--r--test/moto-e-gen1.cc243
-rw-r--r--test/moto-g-gen2.cc243
-rw-r--r--test/moto-g-gen3.cc243
-rw-r--r--test/nexus-s.cc239
-rw-r--r--test/nexus4.cc243
-rw-r--r--test/nexus5x.cc277
-rw-r--r--test/nexus6.cc243
-rw-r--r--test/nexus6p.cc277
-rw-r--r--test/nexus9.cc243
-rw-r--r--test/oppo-r9.cc253
-rw-r--r--test/pixel-c.cc243
-rw-r--r--test/pixel-xl.cc259
-rw-r--r--test/pixel.cc259
-rw-r--r--test/process.py16
-rw-r--r--test/scaleway.cc147
-rw-r--r--test/xiaomi-redmi-2a.cc347
-rw-r--r--test/xperia-c4-dual.cc253
-rw-r--r--test/zenfone-2.cc247
44 files changed, 4740 insertions, 5887 deletions
diff --git a/test/arm-cache.cc b/test/arm-cache.cc
index 4c517a2..5b730bf 100644
--- a/test/arm-cache.cc
+++ b/test/arm-cache.cc
@@ -926,5 +926,4 @@ TEST(NVIDIA, tegra_t210) {
EXPECT_EQ(48 * 1024, l1i.size);
EXPECT_EQ(32 * 1024, l1d.size);
EXPECT_EQ(2 * 1024 * 1024, l2.size);
-}
-
+} \ No newline at end of file
diff --git a/test/blu-r1-hd.cc b/test/blu-r1-hd.cc
index 4b45c46..28cec5f 100644
--- a/test/blu-r1-hd.cc
+++ b/test/blu-r1-hd.cc
@@ -5,154 +5,154 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, non_null) {
- ASSERT_TRUE(cpuinfo_packages);
+ ASSERT_TRUE(cpuinfo_get_packages());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6735",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -277,198 +277,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <blu-r1-hd.h>
@@ -481,4 +452,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-c9-pro.cc b/test/galaxy-c9-pro.cc
index 92c2c65..53671ec 100644
--- a/test/galaxy-c9-pro.cc
+++ b/test/galaxy-c9-pro.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8976",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-c9-pro.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-grand-prime-value-edition.cc b/test/galaxy-grand-prime-value-edition.cc
index e063946..7c1c7c7 100644
--- a/test/galaxy-grand-prime-value-edition.cc
+++ b/test/galaxy-grand-prime-value-edition.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Spreadtrum SC7730SE",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-grand-prime-value-edition.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-j7-tmobile.cc b/test/galaxy-j7-tmobile.cc
index 4771d37..b8beced 100644
--- a/test/galaxy-j7-tmobile.cc
+++ b/test/galaxy-j7-tmobile.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 7580",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-j7-tmobile.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-j7-uae.cc b/test/galaxy-j7-uae.cc
index 1b7bfb0..067e053 100644
--- a/test/galaxy-j7-uae.cc
+++ b/test/galaxy-j7-uae.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 7580",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-j7-uae.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s3-us.cc b/test/galaxy-s3-us.cc
index bd03b76..e4dd834 100644
--- a/test/galaxy-s3-us.cc
+++ b/test/galaxy-s3-us.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x511F04D4), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x511F04D4), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8960",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,200 +273,171 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s3-us.h>
@@ -479,4 +450,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s4-us.cc b/test/galaxy-s4-us.cc
index ed9a383..38ab0e2 100644
--- a/test/galaxy-s4-us.cc
+++ b/test/galaxy-s4-us.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x511F06F0), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x511F06F0), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm APQ8064",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,200 +273,171 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s4-us.h>
@@ -479,4 +450,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s5-global.cc b/test/galaxy-s5-global.cc
index 6c62e77..75da2e9 100644
--- a/test/galaxy-s5-global.cc
+++ b/test/galaxy-s5-global.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a15, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a15, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x412FC0F3), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x412FC0F3), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 5422",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -325,246 +325,217 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(16, l2.instances[k].associativity);
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
break;
case 1:
- ASSERT_EQ(8, l2.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
break;
}
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s5-global.h>
@@ -577,4 +548,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s5-us.cc b/test/galaxy-s5-us.cc
index a4f812a..992b93a 100644
--- a/test/galaxy-s5-us.cc
+++ b/test/galaxy-s5-us.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x512F06F1), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x512F06F1), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8974PRO-AC",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,200 +273,171 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s5-us.h>
@@ -479,4 +450,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s6.cc b/test/galaxy-s6.cc
index 7fa1bf4..cb69b9d 100644
--- a/test/galaxy-s6.cc
+++ b/test/galaxy-s6.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x411FD070), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x411FD070), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 7420",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s6.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s7-global.cc b/test/galaxy-s7-global.cc
index ab2ac35..18f91c3 100644
--- a/test/galaxy-s7-global.cc
+++ b/test/galaxy-s7-global.cc
@@ -5,215 +5,215 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_get_core(i)->vendor);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
break;
}
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x531F0011), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x531F0011), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 8890",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -366,265 +366,236 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(128, l1i.instances[k].line_size);
+ ASSERT_EQ(128, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(8, l1d.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s7-global.h>
@@ -637,4 +608,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s7-us.cc b/test/galaxy-s7-us.cc
index c4950c3..de81b00 100644
--- a/test/galaxy-s7-us.cc
+++ b/test/galaxy-s7-us.cc
@@ -5,177 +5,177 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x511F2052), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x511F2052), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x511F2112), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x511F2112), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8996",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -328,213 +328,184 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(3, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s7-us.h>
@@ -547,4 +518,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s8-global.cc b/test/galaxy-s8-global.cc
index eee7a86..a2bfc87 100644
--- a/test/galaxy-s8-global.cc
+++ b/test/galaxy-s8-global.cc
@@ -5,215 +5,215 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_get_core(i)->vendor);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
break;
}
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x534F0010), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x534F0010), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 8895",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -366,265 +366,236 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(128, l1i.instances[k].line_size);
+ ASSERT_EQ(128, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(8, l1d.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s8-global.h>
@@ -637,4 +608,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s8-us.cc b/test/galaxy-s8-us.cc
index c6a19dc..d636eca 100644
--- a/test/galaxy-s8-us.cc
+++ b/test/galaxy-s8-us.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8998",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,272 +353,243 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
}
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(16, l1d.instances[k].associativity);
+ ASSERT_EQ(16, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s8-us.h>
@@ -631,4 +602,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-tab-3-7.0.cc b/test/galaxy-tab-3-7.0.cc
index ee5e9b1..ff606cf 100644
--- a/test/galaxy-tab-3-7.0.cc
+++ b/test/galaxy-tab-3-7.0.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x413FC090), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x413FC090), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Marvell PXA986",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(32, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-tab-3-7.0.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-win-duos.cc b/test/galaxy-win-duos.cc
index c5d9704..b36973f 100644
--- a/test/galaxy-win-duos.cc
+++ b/test/galaxy-win-duos.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a5, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a5, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC051), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC051), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8625Q",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(32, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-win-duos.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/get-current.cc b/test/get-current.cc
new file mode 100644
index 0000000..c9bc1b9
--- /dev/null
+++ b/test/get-current.cc
@@ -0,0 +1,34 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+
+
+TEST(CURRENT_PROCESSOR, not_null) {
+ ASSERT_TRUE(cpuinfo_get_current_processor());
+}
+
+TEST(CURRENT_PROCESSOR, within_bounds) {
+ const struct cpuinfo_processor* current_processor = cpuinfo_get_current_processor();
+ const struct cpuinfo_processor* processors_begin = cpuinfo_get_processors();
+ const struct cpuinfo_processor* processors_end = processors_begin + cpuinfo_get_processors_count();
+ ASSERT_GE(current_processor, processors_begin);
+ ASSERT_LT(current_processor, processors_end);
+}
+
+TEST(CURRENT_CORE, not_null) {
+ ASSERT_TRUE(cpuinfo_get_current_core());
+}
+
+TEST(CURRENT_CORE, within_bounds) {
+ const struct cpuinfo_core* current_core = cpuinfo_get_current_core();
+ const struct cpuinfo_core* cores_begin = cpuinfo_get_cores();
+ const struct cpuinfo_core* cores_end = cores_begin + cpuinfo_get_cores_count();
+ ASSERT_GE(current_core, cores_begin);
+ ASSERT_LT(current_core, cores_end);
+}
+
+int main(int argc, char* argv[]) {
+ cpuinfo_initialize();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/test/huawei-mate-8.cc b/test/huawei-mate-8.cc
index 3b6544f..63e0f1e 100644
--- a/test/huawei-mate-8.cc
+++ b/test/huawei-mate-8.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("HiSilicon Kirin 950",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <huawei-mate-8.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/huawei-p9-lite.cc b/test/huawei-p9-lite.cc
index ff2f1a9..c47237c 100644
--- a/test/huawei-p9-lite.cc
+++ b/test/huawei-p9-lite.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("HiSilicon Kirin 650",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <huawei-p9-lite.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/init.cc b/test/init.cc
index 67ed926..b8701da 100644
--- a/test/init.cc
+++ b/test/init.cc
@@ -4,480 +4,416 @@
TEST(PROCESSORS_COUNT, non_zero) {
- ASSERT_NE(0, cpuinfo_processors_count);
+ ASSERT_NE(0, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(CORES_COUNT, non_zero) {
- ASSERT_NE(0, cpuinfo_cores_count);
+ ASSERT_NE(0, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, known_vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_NE(cpuinfo_vendor_unknown, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_NE(cpuinfo_vendor_unknown, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, known_uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_NE(cpuinfo_uarch_unknown, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_NE(cpuinfo_uarch_unknown, cpuinfo_get_core(i)->uarch);
}
}
TEST(L1I, non_zero_count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_NE(0, l1i.count);
+ ASSERT_NE(0, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, valid_count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_LE(l1i.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l1i_caches_count(), cpuinfo_get_processors_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, non_zero_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, valid_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].associativity * l1i.instances[k].sets * l1i.instances[k].partitions * l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->associativity * cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, non_zero_associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, non_zero_partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, non_zero_line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, power_of_2_line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].line_size & (l1i.instances[k].line_size - 1));
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->line_size & (cpuinfo_get_l1i_cache(i)->line_size - 1));
}
}
TEST(L1I, valid_line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_GE(l1i.instances[k].line_size, 16);
- ASSERT_LE(l1i.instances[k].line_size, 128);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_GE(cpuinfo_get_l1i_cache(i)->line_size, 16);
+ ASSERT_LE(cpuinfo_get_l1i_cache(i)->line_size, 128);
}
}
TEST(L1I, valid_flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags & ~valid_flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags & ~valid_flags);
}
}
TEST(L1I, non_inclusive) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, l1i.instances[k].flags & CPUINFO_CACHE_INCLUSIVE);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l1i_cache(i)->flags & CPUINFO_CACHE_INCLUSIVE);
}
}
TEST(L1I, non_zero_processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1I, valid_processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_LT(l1i.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l1i.instances[k].processor_start + l1i.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_LT(cpuinfo_get_l1i_cache(i)->processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l1i_cache(i)->processor_start + cpuinfo_get_l1i_cache(i)->processor_count, cpuinfo_get_processors_count());
}
}
TEST(L1D, non_zero_count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_NE(0, l1d.count);
+ ASSERT_NE(0, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, valid_count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_LE(l1d.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l1d_caches_count(), cpuinfo_get_processors_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, non_zero_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, valid_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].associativity * l1d.instances[k].sets * l1d.instances[k].partitions * l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->associativity * cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, non_zero_associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, non_zero_partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, non_zero_line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, power_of_2_line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].line_size & (l1d.instances[k].line_size - 1));
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->line_size & (cpuinfo_get_l1d_cache(i)->line_size - 1));
}
}
TEST(L1D, valid_line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_GE(l1d.instances[k].line_size, 16);
- ASSERT_LE(l1d.instances[k].line_size, 128);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_GE(cpuinfo_get_l1d_cache(i)->line_size, 16);
+ ASSERT_LE(cpuinfo_get_l1d_cache(i)->line_size, 128);
}
}
TEST(L1D, valid_flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags & ~valid_flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags & ~valid_flags);
}
}
TEST(L1D, non_inclusive) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, l1d.instances[k].flags & CPUINFO_CACHE_INCLUSIVE);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l1d_cache(i)->flags & CPUINFO_CACHE_INCLUSIVE);
}
}
TEST(L1D, non_zero_processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L1D, valid_processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_LT(l1d.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l1d.instances[k].processor_start + l1d.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_LT(cpuinfo_get_l1d_cache(i)->processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l1d_cache(i)->processor_start + cpuinfo_get_l1d_cache(i)->processor_count, cpuinfo_get_processors_count());
}
}
TEST(L2, valid_count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_LE(l2.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l2_caches_count(), cpuinfo_get_processors_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- if (l2.count != 0) {
- ASSERT_TRUE(l2.instances);
+ if (cpuinfo_get_l2_caches_count() != 0) {
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
}
TEST(L2, non_zero_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, valid_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].associativity * l2.instances[k].sets * l2.instances[k].partitions * l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->associativity * cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, non_zero_associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, non_zero_partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, non_zero_line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, power_of_2_line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].line_size & (l2.instances[k].line_size - 1));
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->line_size & (cpuinfo_get_l2_cache(i)->line_size - 1));
}
}
TEST(L2, valid_line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_GE(l2.instances[k].line_size, 16);
- ASSERT_LE(l2.instances[k].line_size, 128);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_GE(cpuinfo_get_l2_cache(i)->line_size, 16);
+ ASSERT_LE(cpuinfo_get_l2_cache(i)->line_size, 128);
}
}
TEST(L2, valid_flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags & ~valid_flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags & ~valid_flags);
}
}
TEST(L2, non_zero_processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L2, valid_processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_LT(l2.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l2.instances[k].processor_start + l2.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_LT(cpuinfo_get_l2_cache(i)->processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l2_cache(i)->processor_start + cpuinfo_get_l2_cache(i)->processor_count, cpuinfo_get_processors_count());
}
}
TEST(L3, valid_count) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_LE(l3.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l3_caches_count(), cpuinfo_get_processors_count());
}
TEST(L3, non_null) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- if (l3.count != 0) {
- ASSERT_TRUE(l3.instances);
+ if (cpuinfo_get_l3_caches_count() != 0) {
+ ASSERT_TRUE(cpuinfo_get_l3_caches());
}
}
TEST(L3, non_zero_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].size);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].size);
}
}
TEST(L3, valid_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_EQ(l3.instances[k].size,
- l3.instances[k].associativity * l3.instances[k].sets * l3.instances[k].partitions * l3.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_EQ(cpuinfo_get_l3_caches()[k].size,
+ cpuinfo_get_l3_caches()[k].associativity * cpuinfo_get_l3_caches()[k].sets * cpuinfo_get_l3_caches()[k].partitions * cpuinfo_get_l3_caches()[k].line_size);
}
}
TEST(L3, non_zero_associativity) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].associativity);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].associativity);
}
}
TEST(L3, non_zero_partitions) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].partitions);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].partitions);
}
}
TEST(L3, non_zero_line_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].line_size);
}
}
TEST(L3, power_of_2_line_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_EQ(0, l3.instances[k].line_size & (l3.instances[k].line_size - 1));
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l3_caches()[k].line_size & (cpuinfo_get_l3_caches()[k].line_size - 1));
}
}
TEST(L3, valid_line_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_GE(l3.instances[k].line_size, 16);
- ASSERT_LE(l3.instances[k].line_size, 128);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_GE(cpuinfo_get_l3_caches()[k].line_size, 16);
+ ASSERT_LE(cpuinfo_get_l3_caches()[k].line_size, 128);
}
}
TEST(L3, valid_flags) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_EQ(0, l3.instances[k].flags & ~valid_flags);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l3_caches()[k].flags & ~valid_flags);
}
}
TEST(L3, non_zero_processors) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].processor_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].processor_count);
}
}
TEST(L3, valid_processors) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_LT(l3.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l3.instances[k].processor_start + l3.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_LT(cpuinfo_get_l3_caches()[k].processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l3_caches()[k].processor_start + cpuinfo_get_l3_caches()[k].processor_count, cpuinfo_get_processors_count());
}
}
TEST(L4, valid_count) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_LE(l4.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l4_caches_count(), cpuinfo_get_processors_count());
}
TEST(L4, non_null) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- if (l4.count != 0) {
- ASSERT_TRUE(l4.instances);
+ if (cpuinfo_get_l4_caches_count() != 0) {
+ ASSERT_TRUE(cpuinfo_get_l4_caches());
}
}
TEST(L4, non_zero_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].size);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].size);
}
}
TEST(L4, valid_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_EQ(l4.instances[k].size,
- l4.instances[k].associativity * l4.instances[k].sets * l4.instances[k].partitions * l4.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_EQ(cpuinfo_get_l4_caches()[k].size,
+ cpuinfo_get_l4_caches()[k].associativity * cpuinfo_get_l4_caches()[k].sets * cpuinfo_get_l4_caches()[k].partitions * cpuinfo_get_l4_caches()[k].line_size);
}
}
TEST(L4, non_zero_associativity) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].associativity);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].associativity);
}
}
TEST(L4, non_zero_partitions) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].partitions);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].partitions);
}
}
TEST(L4, non_zero_line_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].line_size);
}
}
TEST(L4, power_of_2_line_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_EQ(0, l4.instances[k].line_size & (l4.instances[k].line_size - 1));
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l4_caches()[k].line_size & (cpuinfo_get_l4_caches()[k].line_size - 1));
}
}
TEST(L4, valid_line_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_GE(l4.instances[k].line_size, 16);
- ASSERT_LE(l4.instances[k].line_size, 128);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_GE(cpuinfo_get_l4_caches()[k].line_size, 16);
+ ASSERT_LE(cpuinfo_get_l4_caches()[k].line_size, 128);
}
}
TEST(L4, valid_flags) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_EQ(0, l4.instances[k].flags & ~valid_flags);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l4_caches()[k].flags & ~valid_flags);
}
}
TEST(L4, non_zero_processors) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].processor_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].processor_count);
}
}
TEST(L4, valid_processors) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_LT(l4.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l4.instances[k].processor_start + l4.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_LT(cpuinfo_get_l4_caches()[k].processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l4_caches()[k].processor_start + cpuinfo_get_l4_caches()[k].processor_count, cpuinfo_get_processors_count());
}
}
@@ -485,4 +421,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/lenovo-a6600-plus.cc b/test/lenovo-a6600-plus.cc
index 3482024..e71802d 100644
--- a/test/lenovo-a6600-plus.cc
+++ b/test/lenovo-a6600-plus.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6735P",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <lenovo-a6600-plus.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/lenovo-vibe-x2.cc b/test/lenovo-vibe-x2.cc
index 460e04e..6faae6c 100644
--- a/test/lenovo-vibe-x2.cc
+++ b/test/lenovo-vibe-x2.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a17, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a17, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x410FC0E0), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FC0E0), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6595",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -325,246 +325,217 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(16, l2.instances[k].associativity);
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
break;
case 1:
- ASSERT_EQ(8, l2.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
break;
}
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <lenovo-vibe-x2.h>
@@ -577,4 +548,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/lg-k10-eu.cc b/test/lg-k10-eu.cc
index b21377d..e37e3b3 100644
--- a/test/lg-k10-eu.cc
+++ b/test/lg-k10-eu.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8916",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <lg-k10-eu.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/meizu-pro-6s.cc b/test/meizu-pro-6s.cc
index 68a0708..8ac0fa7 100644
--- a/test/meizu-pro-6s.cc
+++ b/test/meizu-pro-6s.cc
@@ -5,145 +5,145 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(10, cpuinfo_processors_count);
+ ASSERT_EQ(10, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 8, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 8, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 6:
case 7:
case 8:
case 9:
- ASSERT_EQ(i - 6, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 6, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
case 6:
case 7:
case 8:
case 9:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[2], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(2), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(10, cpuinfo_cores_count);
+ ASSERT_EQ(10, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_core(i)->uarch);
break;
case 2:
case 3:
@@ -153,18 +153,18 @@ TEST(CORES, uarch) {
case 7:
case 8:
case 9:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x410FD081), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD081), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
@@ -174,45 +174,45 @@ TEST(CORES, midr) {
case 7:
case 8:
case 9:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6797T",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(10, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(10, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(10, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(10, cpuinfo_get_package(i)->core_count);
}
}
@@ -365,22 +365,19 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(10, l1i.count);
+ ASSERT_EQ(10, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 2:
case 3:
@@ -390,19 +387,18 @@ TEST(L1I, size) {
case 7:
case 8:
case 9:
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 2:
case 3:
@@ -412,66 +408,58 @@ TEST(L1I, associativity) {
case 7:
case 8:
case 9:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(10, l1d.count);
+ ASSERT_EQ(10, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
case 2:
case 3:
@@ -481,19 +469,18 @@ TEST(L1D, size) {
case 7:
case 8:
case 9:
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
}
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 2:
case 3:
@@ -503,148 +490,132 @@ TEST(L1D, associativity) {
case 7:
case 8:
case 9:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(3, l2.count);
+ ASSERT_EQ(3, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
case 2:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
case 2:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 2:
- ASSERT_EQ(6, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(6, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <meizu-pro-6s.h>
@@ -657,4 +628,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/memo-pad-7.cc b/test/memo-pad-7.cc
index c5ac4a9..f73addb 100644
--- a/test/memo-pad-7.cc
+++ b/test/memo-pad-7.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[i / 2], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i / 2), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, cpuid) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x00030678), cpuinfo_cores[i].cpuid);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x00030678), cpuinfo_get_core(i)->cpuid);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Atom Z3745",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -364,8 +364,8 @@ TEST(ISA, movbe) {
ASSERT_TRUE(cpuinfo_has_x86_movbe());
}
-TEST(ISA, lahf_salf) {
- ASSERT_TRUE(cpuinfo_has_x86_lahf_salf());
+TEST(ISA, lahf_sahf) {
+ ASSERT_TRUE(cpuinfo_has_x86_lahf_sahf());
}
TEST(ISA, lzcnt) {
@@ -413,198 +413,169 @@ TEST(ISA, sha) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(8, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(6, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(CPUINFO_CACHE_UNIFIED, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(CPUINFO_CACHE_UNIFIED, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(k * 2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(i * 2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <memo-pad-7.h>
@@ -615,4 +586,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/moto-e-gen1.cc b/test/moto-e-gen1.cc
index c1c94e5..9904aad 100644
--- a/test/moto-e-gen1.cc
+++ b/test/moto-e-gen1.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8610",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <moto-e-gen1.h>
@@ -474,4 +445,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/moto-g-gen2.cc b/test/moto-g-gen2.cc
index d5b1c86..da6b0ac 100644
--- a/test/moto-g-gen2.cc
+++ b/test/moto-g-gen2.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8226",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <moto-g-gen2.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/moto-g-gen3.cc b/test/moto-g-gen3.cc
index e1265d2..a59312c 100644
--- a/test/moto-g-gen3.cc
+++ b/test/moto-g-gen3.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8916",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <moto-g-gen3.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus-s.cc b/test/nexus-s.cc
index edfa77c..db68226 100644
--- a/test/nexus-s.cc
+++ b/test/nexus-s.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(1, cpuinfo_processors_count);
+ ASSERT_EQ(1, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(1, cpuinfo_cores_count);
+ ASSERT_EQ(1, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a8, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a8, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x412FC082), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x412FC082), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 3110",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(1, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(1, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,196 +273,167 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(1, l1i.count);
+ ASSERT_EQ(1, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(128, l1i.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l1i_cache(i)->sets);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(1, l1d.count);
+ ASSERT_EQ(1, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(128, l1d.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l1d_cache(i)->sets);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(1, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus-s.h>
@@ -475,4 +446,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus4.cc b/test/nexus4.cc
index 755a29d..22002f9 100644
--- a/test/nexus4.cc
+++ b/test/nexus4.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x510F06F2), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x510F06F2), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm APQ8064",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus4.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus5x.cc b/test/nexus5x.cc
index 7082949..4c333e9 100644
--- a/test/nexus5x.cc
+++ b/test/nexus5x.cc
@@ -5,194 +5,194 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(6, cpuinfo_processors_count);
+ ASSERT_EQ(6, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(6, cpuinfo_cores_count);
+ ASSERT_EQ(6, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x411FD072), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x411FD072), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8992",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(6, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(6, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_package(i)->core_count);
}
}
@@ -345,253 +345,224 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(6, l1i.count);
+ ASSERT_EQ(6, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(6, l1d.count);
+ ASSERT_EQ(6, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus5x.h>
@@ -604,4 +575,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus6.cc b/test/nexus6.cc
index 7afa612..edbf91b 100644
--- a/test/nexus6.cc
+++ b/test/nexus6.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x513F06F1), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x513F06F1), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm APQ8084",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus6.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus6p.cc b/test/nexus6p.cc
index 1a0fc0b..e356e14 100644
--- a/test/nexus6p.cc
+++ b/test/nexus6p.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8994",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus6p.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus9.cc b/test/nexus9.cc
index 3e0a71c..abdfec7 100644
--- a/test/nexus9.cc
+++ b/test/nexus9.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_nvidia, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_nvidia, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_denver, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_denver, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x4E0F0000), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x4E0F0000), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("nVidia Tegra T132",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -301,198 +301,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(128 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(128 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus9.h>
@@ -505,4 +476,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/oppo-r9.cc b/test/oppo-r9.cc
index 5228582..2d954b8 100644
--- a/test/oppo-r9.cc
+++ b/test/oppo-r9.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, DISABLED_linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6755",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <oppo-r9.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/pixel-c.cc b/test/pixel-c.cc
index 5d92cf6..28cf7f9 100644
--- a/test/pixel-c.cc
+++ b/test/pixel-c.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("nVidia Tegra T210",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -301,198 +301,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <pixel-c.h>
@@ -505,4 +476,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/pixel-xl.cc b/test/pixel-xl.cc
index 62b5e73..0c6d374 100644
--- a/test/pixel-xl.cc
+++ b/test/pixel-xl.cc
@@ -5,177 +5,177 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8996PRO-AB",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -328,213 +328,184 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(3, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <pixel-xl.h>
@@ -547,4 +518,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/pixel.cc b/test/pixel.cc
index 5b8dce3..2175c85 100644
--- a/test/pixel.cc
+++ b/test/pixel.cc
@@ -5,177 +5,177 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8996PRO-AB",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -328,213 +328,184 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(3, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <pixel.h>
@@ -547,4 +518,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/process.py b/test/process.py
new file mode 100644
index 0000000..18b6161
--- /dev/null
+++ b/test/process.py
@@ -0,0 +1,16 @@
+import re
+import sys
+
+for fn in sys.argv[1:]:
+ code = open(fn).read().splitlines()
+ new_code = []
+ for line in code:
+ if line.strip().startswith("cpuinfo_caches l"):
+ continue
+ if "switch (k)" in line:
+ line = line.replace("switch (k)", "switch (i)")
+ elif "ASSERT_EQ(k," in line:
+ line = line.replace("ASSERT_EQ(k,", "ASSERT_EQ(i,")
+ new_code.append(line)
+
+ open(fn, "w").write("\n".join(new_code))
diff --git a/test/scaleway.cc b/test/scaleway.cc
index c53facb..c794fcb 100644
--- a/test/scaleway.cc
+++ b/test/scaleway.cc
@@ -9,18 +9,18 @@ TEST(PROCESSORS, count) {
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, vendor) {
for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_cavium, cpuinfo_processors[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_cavium, cpuinfo_get_processors()[i].vendor);
}
}
TEST(PROCESSORS, uarch) {
for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_thunderx, cpuinfo_processors[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_thunderx, cpuinfo_get_processors()[i].uarch);
}
}
@@ -139,195 +139,166 @@ TEST(ISA, fcma) {
#endif /* CPUINFO_ARCH_ARM64 */
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(78 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(78 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(312, l1i.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(312, cpuinfo_get_l1i_cache(i)->sets);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(128, l1d.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l1d_cache(i)->sets);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(32768, l2.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(32768, cpuinfo_get_l2_cache(i)->sets);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <scaleway.h>
@@ -337,4 +308,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/xiaomi-redmi-2a.cc b/test/xiaomi-redmi-2a.cc
index 0176a95..eaf1a4e 100644
--- a/test/xiaomi-redmi-2a.cc
+++ b/test/xiaomi-redmi-2a.cc
@@ -5,463 +5,462 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(5, cpuinfo_processors_count);
+ ASSERT_EQ(5, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(5, cpuinfo_cores_count);
+ ASSERT_EQ(5, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Leadcore LC1860",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(5, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(5, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(5, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(5, cpuinfo_get_package(i)->core_count);
}
}
TEST(ISA, thumb) {
- ASSERT_TRUE(cpuinfo_isa.thumb);
+ ASSERT_TRUE(cpuinfo_has_arm_thumb());
}
TEST(ISA, thumb2) {
- ASSERT_TRUE(cpuinfo_isa.thumb2);
-}
-
-TEST(ISA, thumbee) {
- ASSERT_FALSE(cpuinfo_isa.thumbee);
-}
-
-TEST(ISA, jazelle) {
- ASSERT_FALSE(cpuinfo_isa.jazelle);
+ ASSERT_TRUE(cpuinfo_has_arm_thumb2());
}
TEST(ISA, armv5e) {
- ASSERT_TRUE(cpuinfo_isa.armv5e);
+ ASSERT_TRUE(cpuinfo_has_arm_v5e());
}
TEST(ISA, armv6) {
- ASSERT_TRUE(cpuinfo_isa.armv6);
+ ASSERT_TRUE(cpuinfo_has_arm_v6());
}
TEST(ISA, armv6k) {
- ASSERT_TRUE(cpuinfo_isa.armv6k);
+ ASSERT_TRUE(cpuinfo_has_arm_v6k());
}
TEST(ISA, armv7) {
- ASSERT_TRUE(cpuinfo_isa.armv7);
+ ASSERT_TRUE(cpuinfo_has_arm_v7());
}
TEST(ISA, armv7mp) {
- ASSERT_TRUE(cpuinfo_isa.armv7mp);
+ ASSERT_TRUE(cpuinfo_has_arm_v7mp());
}
TEST(ISA, idiv) {
- ASSERT_TRUE(cpuinfo_isa.idiv);
+ ASSERT_TRUE(cpuinfo_has_arm_idiv());
}
TEST(ISA, vfpv2) {
- ASSERT_FALSE(cpuinfo_isa.vfpv2);
+ ASSERT_FALSE(cpuinfo_has_arm_vfpv2());
}
TEST(ISA, vfpv3) {
- ASSERT_TRUE(cpuinfo_isa.vfpv3);
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3());
}
-TEST(ISA, d32) {
- ASSERT_TRUE(cpuinfo_isa.d32);
+TEST(ISA, vfpv3_d32) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3_d32());
}
-TEST(ISA, fp16) {
- ASSERT_TRUE(cpuinfo_isa.fp16);
+TEST(ISA, vfpv3_fp16) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3_fp16());
}
-TEST(ISA, fma) {
- ASSERT_TRUE(cpuinfo_isa.fma);
+TEST(ISA, vfpv3_fp16_d32) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3_fp16_d32());
+}
+
+TEST(ISA, vfpv4) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv4());
+}
+
+TEST(ISA, vfpv4_d32) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv4_d32());
}
TEST(ISA, wmmx) {
- ASSERT_FALSE(cpuinfo_isa.wmmx);
+ ASSERT_FALSE(cpuinfo_has_arm_wmmx());
}
TEST(ISA, wmmx2) {
- ASSERT_FALSE(cpuinfo_isa.wmmx2);
+ ASSERT_FALSE(cpuinfo_has_arm_wmmx2());
}
TEST(ISA, neon) {
- ASSERT_TRUE(cpuinfo_isa.neon);
+ ASSERT_TRUE(cpuinfo_has_arm_neon());
+}
+
+TEST(ISA, neon_fp16) {
+ ASSERT_TRUE(cpuinfo_has_arm_neon_fp16());
+}
+
+TEST(ISA, neon_fma) {
+ ASSERT_TRUE(cpuinfo_has_arm_neon_fma());
+}
+
+TEST(ISA, atomics) {
+ ASSERT_FALSE(cpuinfo_has_arm_atomics());
+}
+
+TEST(ISA, neon_rdm) {
+ ASSERT_FALSE(cpuinfo_has_arm_neon_rdm());
+}
+
+TEST(ISA, fp16_arith) {
+ ASSERT_FALSE(cpuinfo_has_arm_fp16_arith());
+}
+
+TEST(ISA, jscvt) {
+ ASSERT_FALSE(cpuinfo_has_arm_jscvt());
+}
+
+TEST(ISA, fcma) {
+ ASSERT_FALSE(cpuinfo_has_arm_fcma());
}
TEST(ISA, aes) {
- ASSERT_FALSE(cpuinfo_isa.aes);
+ ASSERT_FALSE(cpuinfo_has_arm_aes());
}
TEST(ISA, sha1) {
- ASSERT_FALSE(cpuinfo_isa.sha1);
+ ASSERT_FALSE(cpuinfo_has_arm_sha1());
}
TEST(ISA, sha2) {
- ASSERT_FALSE(cpuinfo_isa.sha2);
+ ASSERT_FALSE(cpuinfo_has_arm_sha2());
}
TEST(ISA, pmull) {
- ASSERT_FALSE(cpuinfo_isa.pmull);
+ ASSERT_FALSE(cpuinfo_has_arm_pmull());
}
TEST(ISA, crc32) {
- ASSERT_FALSE(cpuinfo_isa.crc32);
+ ASSERT_FALSE(cpuinfo_has_arm_crc32());
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(5, l1i.count);
+ ASSERT_EQ(5, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(5, l1d.count);
+ ASSERT_EQ(5, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(128 * 1024, l2.instances[k].size);
+ ASSERT_EQ(128 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(1, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <xiaomi-redmi-2a.h>
@@ -474,4 +473,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/xperia-c4-dual.cc b/test/xperia-c4-dual.cc
index 07c5e63..e638765 100644
--- a/test/xperia-c4-dual.cc
+++ b/test/xperia-c4-dual.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, DISABLED_linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6752",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <xperia-c4-dual.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/zenfone-2.cc b/test/zenfone-2.cc
index 7ef23aa..8a7f824 100644
--- a/test/zenfone-2.cc
+++ b/test/zenfone-2.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[i / 2], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i / 2), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, cpuid) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x000506A0), cpuinfo_cores[i].cpuid);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x000506A0), cpuinfo_get_core(i)->cpuid);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Atom Z3580",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -364,8 +364,8 @@ TEST(ISA, movbe) {
ASSERT_TRUE(cpuinfo_has_x86_movbe());
}
-TEST(ISA, lahf_salf) {
- ASSERT_TRUE(cpuinfo_has_x86_lahf_salf());
+TEST(ISA, lahf_sahf) {
+ ASSERT_TRUE(cpuinfo_has_x86_lahf_sahf());
}
TEST(ISA, lzcnt) {
@@ -413,198 +413,169 @@ TEST(ISA, sha) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(8, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(6, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(CPUINFO_CACHE_UNIFIED, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(CPUINFO_CACHE_UNIFIED, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(k * 2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(i * 2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <zenfone-2.h>
@@ -615,4 +586,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file