aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarat Dukhan <marat@fb.com>2017-09-26 18:35:52 -0700
committerMarat Dukhan <marat@fb.com>2017-09-26 18:35:52 -0700
commit3040197bc3eb13796351e74a2e7a6f2bcc081752 (patch)
tree2387e26e11be3307d931e4741d2f3ae559e0bb09
parent2b24889379602c17d2448c891e25c76d2b039ffc (diff)
downloadcpuinfo-3040197bc3eb13796351e74a2e7a6f2bcc081752.tar.gz
Major API refactoring
-rw-r--r--README.md6
-rw-r--r--include/cpuinfo.h55
-rw-r--r--jni/Android.mk10
-rw-r--r--src/api.c144
-rw-r--r--src/api.h16
-rw-r--r--src/cache.c43
-rw-r--r--src/linux/current.c5
-rw-r--r--test/arm-cache.cc3
-rw-r--r--test/blu-r1-hd.cc245
-rw-r--r--test/galaxy-c9-pro.cc277
-rw-r--r--test/galaxy-grand-prime-value-edition.cc243
-rw-r--r--test/galaxy-j7-tmobile.cc253
-rw-r--r--test/galaxy-j7-uae.cc253
-rw-r--r--test/galaxy-s3-us.cc245
-rw-r--r--test/galaxy-s4-us.cc245
-rw-r--r--test/galaxy-s5-global.cc273
-rw-r--r--test/galaxy-s5-us.cc245
-rw-r--r--test/galaxy-s6.cc277
-rw-r--r--test/galaxy-s7-global.cc279
-rw-r--r--test/galaxy-s7-us.cc259
-rw-r--r--test/galaxy-s8-global.cc279
-rw-r--r--test/galaxy-s8-us.cc281
-rw-r--r--test/galaxy-tab-3-7.0.cc243
-rw-r--r--test/galaxy-win-duos.cc243
-rw-r--r--test/get-current.cc34
-rw-r--r--test/huawei-mate-8.cc277
-rw-r--r--test/huawei-p9-lite.cc253
-rw-r--r--test/init.cc350
-rw-r--r--test/lenovo-a6600-plus.cc243
-rw-r--r--test/lenovo-vibe-x2.cc273
-rw-r--r--test/lg-k10-eu.cc243
-rw-r--r--test/meizu-pro-6s.cc289
-rw-r--r--test/memo-pad-7.cc247
-rw-r--r--test/moto-e-gen1.cc243
-rw-r--r--test/moto-g-gen2.cc243
-rw-r--r--test/moto-g-gen3.cc243
-rw-r--r--test/nexus-s.cc239
-rw-r--r--test/nexus4.cc243
-rw-r--r--test/nexus5x.cc277
-rw-r--r--test/nexus6.cc243
-rw-r--r--test/nexus6p.cc277
-rw-r--r--test/nexus9.cc243
-rw-r--r--test/oppo-r9.cc253
-rw-r--r--test/pixel-c.cc243
-rw-r--r--test/pixel-xl.cc259
-rw-r--r--test/pixel.cc259
-rw-r--r--test/process.py16
-rw-r--r--test/scaleway.cc147
-rw-r--r--test/xiaomi-redmi-2a.cc347
-rw-r--r--test/xperia-c4-dual.cc253
-rw-r--r--test/zenfone-2.cc247
-rw-r--r--tools/cache-info.c20
-rw-r--r--tools/cpu-info.c32
-rw-r--r--tools/gpu-dump.c450
-rw-r--r--tools/isa-info.c219
55 files changed, 5515 insertions, 6112 deletions
diff --git a/README.md b/README.md
index 7bbde41..571fbe1 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ if (cpuinfo_has_x86_avx) {
Check if the thread runs on a Cortex-A53 core
```c
cpuinfo_initialize();
-if (cpuinfo_current_core()->uarch == cpuinfo_uarch_cortex_a53) {
+if (cpuinfo_get_current_core()->uarch == cpuinfo_uarch_cortex_a53) {
cortex_a53_implementation(arguments);
}
```
@@ -39,7 +39,7 @@ if (cpuinfo_current_core()->uarch == cpuinfo_uarch_cortex_a53) {
Get the size of level 1 data cache on the fastest core in the processor (e.g. big core in big.LITTLE ARM systems):
```c
cpuinfo_initialize();
-const size_t l1_size = cpuinfo_processors[0].l1d->size;
+const size_t l1_size = cpuinfo_get_processor(0)->l1d->size;
```
Pin thread to cores sharing L2 cache with the current core (Linux or Android)
@@ -47,7 +47,7 @@ Pin thread to cores sharing L2 cache with the current core (Linux or Android)
cpuinfo_initialize();
cpu_set_t cpu_set;
CPU_ZERO(&cpu_set);
-const struct cpuinfo_cache* current_l2 = cpuinfo_current_processor()->l2;
+const struct cpuinfo_cache* current_l2 = cpuinfo_get_current_processor()->l2;
for (uint32_t i = 0; i < current_l2->processor_count; i++) {
CPU_SET(cpuinfo_processors[current_l2->processor_start + i].linux_id, &cpu_set);
}
diff --git a/include/cpuinfo.h b/include/cpuinfo.h
index e60f70e..1df6097 100644
--- a/include/cpuinfo.h
+++ b/include/cpuinfo.h
@@ -80,15 +80,6 @@
#define CPUINFO_CACHE_INCLUSIVE 0x00000002
#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004
-enum cpuinfo_cache_level {
- cpuinfo_cache_level_1i = 0,
- cpuinfo_cache_level_1d = 1,
- cpuinfo_cache_level_2 = 2,
- cpuinfo_cache_level_3 = 3,
- cpuinfo_cache_level_4 = 4,
- cpuinfo_cache_level_max = 5,
-};
-
struct cpuinfo_cache {
/** Cache size in bytes */
uint32_t size;
@@ -112,11 +103,6 @@ struct cpuinfo_cache {
uint32_t processor_count;
};
-struct cpuinfo_caches {
- uint32_t count;
- const struct cpuinfo_cache* instances;
-};
-
struct cpuinfo_trace_cache {
uint32_t uops;
uint32_t associativity;
@@ -1096,7 +1082,7 @@ static inline bool cpuinfo_has_x86_movbe(void) {
#endif
}
-static inline bool cpuinfo_has_x86_lahf_salf(void) {
+static inline bool cpuinfo_has_x86_lahf_sahf(void) {
#if CPUINFO_ARCH_X86
return true;
#elif CPUINFO_ARCH_X86_64
@@ -1503,22 +1489,35 @@ static inline bool cpuinfo_has_arm_crc32(void) {
#endif
}
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l1i_cache(void);
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l1d_cache(void);
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l2_cache(void);
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l3_cache(void);
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l4_cache(void);
+const struct cpuinfo_processor* cpuinfo_get_processors(void);
+const struct cpuinfo_core* cpuinfo_get_cores(void);
+const struct cpuinfo_package* cpuinfo_get_packages(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);
-extern struct cpuinfo_processor* cpuinfo_processors;
-extern struct cpuinfo_core* cpuinfo_cores;
-extern struct cpuinfo_package* cpuinfo_packages;
+const struct cpuinfo_processor* cpuinfo_get_processor(uint32_t index);
+const struct cpuinfo_core* cpuinfo_get_core(uint32_t index);
+const struct cpuinfo_package* cpuinfo_get_package(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index);
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);
-extern uint32_t cpuinfo_processors_count;
-extern uint32_t cpuinfo_cores_count;
-extern uint32_t cpuinfo_packages_count;
+uint32_t cpuinfo_get_processors_count(void);
+uint32_t cpuinfo_get_cores_count(void);
+uint32_t cpuinfo_get_packages_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);
-const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_current_processor(void);
-const struct cpuinfo_core* CPUINFO_ABI cpuinfo_current_core(void);
+const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void);
+const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);
#ifdef __cplusplus
} /* extern "C" */
diff --git a/jni/Android.mk b/jni/Android.mk
index 0d02516..82979dc 100644
--- a/jni/Android.mk
+++ b/jni/Android.mk
@@ -3,7 +3,7 @@ LOCAL_PATH := $(call my-dir)/..
include $(CLEAR_VARS)
LOCAL_MODULE := cpuinfo
LOCAL_SRC_FILES := $(LOCAL_PATH)/src/init.c \
- $(LOCAL_PATH)/src/cache.c \
+ $(LOCAL_PATH)/src/api.c \
$(LOCAL_PATH)/src/log.c \
$(LOCAL_PATH)/src/gpu/gles2.c \
$(LOCAL_PATH)/src/linux/current.c \
@@ -54,7 +54,7 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := cpuinfo_mock
LOCAL_SRC_FILES := $(LOCAL_PATH)/src/init.c \
- $(LOCAL_PATH)/src/cache.c \
+ $(LOCAL_PATH)/src/api.c \
$(LOCAL_PATH)/src/log.c \
$(LOCAL_PATH)/src/gpu/gles2.c \
$(LOCAL_PATH)/src/linux/current.c \
@@ -168,6 +168,12 @@ LOCAL_C_INCLUDES := $(LOCAL_PATH)/test
LOCAL_STATIC_LIBRARIES := cpuinfo gtest
include $(BUILD_EXECUTABLE)
+include $(CLEAR_VARS)
+LOCAL_MODULE := get-current-test
+LOCAL_SRC_FILES := $(LOCAL_PATH)/test/get-current.cc
+LOCAL_C_INCLUDES := $(LOCAL_PATH)/test
+LOCAL_STATIC_LIBRARIES := cpuinfo gtest
+include $(BUILD_EXECUTABLE)
ifeq ($(TARGET_ARCH_ABI),$(filter $(TARGET_ARCH_ABI),armeabi armeabi-v7a))
diff --git a/src/api.c b/src/api.c
new file mode 100644
index 0000000..0917101
--- /dev/null
+++ b/src/api.c
@@ -0,0 +1,144 @@
+#include <stddef.h>
+
+#include <cpuinfo.h>
+#include <api.h>
+
+
+struct cpuinfo_processor* cpuinfo_processors;
+struct cpuinfo_core* cpuinfo_cores;
+struct cpuinfo_package* cpuinfo_packages;
+struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max] = { NULL };
+
+uint32_t cpuinfo_processors_count;
+uint32_t cpuinfo_cores_count;
+uint32_t cpuinfo_packages_count;
+uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max] = { 0 };
+
+
+const struct cpuinfo_processor* cpuinfo_get_processors(void) {
+ return cpuinfo_processors;
+}
+
+const struct cpuinfo_core* cpuinfo_get_cores(void) {
+ return cpuinfo_cores;
+}
+
+const struct cpuinfo_package* cpuinfo_get_packages(void) {
+ return cpuinfo_packages;
+}
+
+const struct cpuinfo_processor* cpuinfo_get_processor(uint32_t index) {
+ if (index < cpuinfo_processors_count) {
+ return cpuinfo_processors + index;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cpuinfo_core* cpuinfo_get_core(uint32_t index) {
+ if (index < cpuinfo_cores_count) {
+ return cpuinfo_cores + index;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cpuinfo_package* cpuinfo_get_package(uint32_t index) {
+ if (index < cpuinfo_packages_count) {
+ return cpuinfo_packages + index;
+ } else {
+ return NULL;
+ }
+}
+
+uint32_t cpuinfo_get_processors_count(void) {
+ return cpuinfo_processors_count;
+}
+
+uint32_t cpuinfo_get_cores_count(void) {
+ return cpuinfo_cores_count;
+}
+
+uint32_t cpuinfo_get_packages_count(void) {
+ return cpuinfo_packages_count;
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void) {
+ return cpuinfo_cache[cpuinfo_cache_level_1i];
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void) {
+ return cpuinfo_cache[cpuinfo_cache_level_1d];
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void) {
+ return cpuinfo_cache[cpuinfo_cache_level_2];
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void) {
+ return cpuinfo_cache[cpuinfo_cache_level_3];
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void) {
+ return cpuinfo_cache[cpuinfo_cache_level_4];
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index) {
+ if (index < cpuinfo_cache_count[cpuinfo_cache_level_1i]) {
+ return cpuinfo_cache[cpuinfo_cache_level_1i] + index;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index) {
+ if (index < cpuinfo_cache_count[cpuinfo_cache_level_1d]) {
+ return cpuinfo_cache[cpuinfo_cache_level_1d] + index;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index) {
+ if (index < cpuinfo_cache_count[cpuinfo_cache_level_2]) {
+ return cpuinfo_cache[cpuinfo_cache_level_2] + index;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index) {
+ if (index < cpuinfo_cache_count[cpuinfo_cache_level_3]) {
+ return cpuinfo_cache[cpuinfo_cache_level_3] + index;
+ } else {
+ return NULL;
+ }
+}
+
+const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index) {
+ if (index < cpuinfo_cache_count[cpuinfo_cache_level_4]) {
+ return cpuinfo_cache[cpuinfo_cache_level_4] + index;
+ } else {
+ return NULL;
+ }
+}
+
+uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void) {
+ return cpuinfo_cache_count[cpuinfo_cache_level_1i];
+}
+
+uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void) {
+ return cpuinfo_cache_count[cpuinfo_cache_level_1d];
+}
+
+uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void) {
+ return cpuinfo_cache_count[cpuinfo_cache_level_2];
+}
+
+uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void) {
+ return cpuinfo_cache_count[cpuinfo_cache_level_3];
+}
+
+uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void) {
+ return cpuinfo_cache_count[cpuinfo_cache_level_4];
+}
diff --git a/src/api.h b/src/api.h
index f65d280..95bce59 100644
--- a/src/api.h
+++ b/src/api.h
@@ -5,9 +5,23 @@
#include <cpuinfo.h>
+enum cpuinfo_cache_level {
+ cpuinfo_cache_level_1i = 0,
+ cpuinfo_cache_level_1d = 1,
+ cpuinfo_cache_level_2 = 2,
+ cpuinfo_cache_level_3 = 3,
+ cpuinfo_cache_level_4 = 4,
+ cpuinfo_cache_level_max = 5,
+};
+
+extern struct cpuinfo_processor* cpuinfo_processors;
+extern struct cpuinfo_core* cpuinfo_cores;
+extern struct cpuinfo_package* cpuinfo_packages;
extern struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max];
-extern uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max];
extern uint32_t cpuinfo_processors_count;
+extern uint32_t cpuinfo_cores_count;
+extern uint32_t cpuinfo_packages_count;
+extern uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max];
void cpuinfo_x86_mach_init(void);
void cpuinfo_x86_linux_init(void);
diff --git a/src/cache.c b/src/cache.c
deleted file mode 100644
index a26b057..0000000
--- a/src/cache.c
+++ /dev/null
@@ -1,43 +0,0 @@
-#include <stddef.h>
-
-#include <cpuinfo.h>
-#include <api.h>
-
-
-struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max] = { NULL };
-uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max] = { 0 };
-
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l1i_cache(void) {
- return (struct cpuinfo_caches) {
- .count = cpuinfo_cache_count[cpuinfo_cache_level_1i],
- .instances = cpuinfo_cache[cpuinfo_cache_level_1i]
- };
-}
-
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l1d_cache(void) {
- return (struct cpuinfo_caches) {
- .count = cpuinfo_cache_count[cpuinfo_cache_level_1d],
- .instances = cpuinfo_cache[cpuinfo_cache_level_1d]
- };
-}
-
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l2_cache(void) {
- return (struct cpuinfo_caches) {
- .count = cpuinfo_cache_count[cpuinfo_cache_level_2],
- .instances = cpuinfo_cache[cpuinfo_cache_level_2]
- };
-}
-
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l3_cache(void) {
- return (struct cpuinfo_caches) {
- .count = cpuinfo_cache_count[cpuinfo_cache_level_3],
- .instances = cpuinfo_cache[cpuinfo_cache_level_3]
- };
-}
-
-struct cpuinfo_caches CPUINFO_ABI cpuinfo_get_l4_cache(void) {
- return (struct cpuinfo_caches) {
- .count = cpuinfo_cache_count[cpuinfo_cache_level_4],
- .instances = cpuinfo_cache[cpuinfo_cache_level_4]
- };
-}
diff --git a/src/linux/current.c b/src/linux/current.c
index 7fa0aab..7654e4d 100644
--- a/src/linux/current.c
+++ b/src/linux/current.c
@@ -7,6 +7,7 @@
#include <sched.h>
#include <cpuinfo.h>
+#include <api.h>
#include <linux/api.h>
@@ -14,7 +15,7 @@ const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map;
const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map;
-const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_current_processor(void) {
+const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void) {
const int cpu = sched_getcpu();
if (cpu >= 0) {
return cpuinfo_linux_cpu_to_processor_map[cpu];
@@ -23,7 +24,7 @@ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_current_processor(void) {
}
}
-const struct cpuinfo_core* CPUINFO_ABI cpuinfo_current_core(void) {
+const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void) {
const int cpu = sched_getcpu();
if (cpu >= 0) {
return cpuinfo_linux_cpu_to_core_map[cpu];
diff --git a/test/arm-cache.cc b/test/arm-cache.cc
index 4c517a2..5b730bf 100644
--- a/test/arm-cache.cc
+++ b/test/arm-cache.cc
@@ -926,5 +926,4 @@ TEST(NVIDIA, tegra_t210) {
EXPECT_EQ(48 * 1024, l1i.size);
EXPECT_EQ(32 * 1024, l1d.size);
EXPECT_EQ(2 * 1024 * 1024, l2.size);
-}
-
+} \ No newline at end of file
diff --git a/test/blu-r1-hd.cc b/test/blu-r1-hd.cc
index 4b45c46..28cec5f 100644
--- a/test/blu-r1-hd.cc
+++ b/test/blu-r1-hd.cc
@@ -5,154 +5,154 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, non_null) {
- ASSERT_TRUE(cpuinfo_packages);
+ ASSERT_TRUE(cpuinfo_get_packages());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6735",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -277,198 +277,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <blu-r1-hd.h>
@@ -481,4 +452,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-c9-pro.cc b/test/galaxy-c9-pro.cc
index 92c2c65..53671ec 100644
--- a/test/galaxy-c9-pro.cc
+++ b/test/galaxy-c9-pro.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8976",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-c9-pro.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-grand-prime-value-edition.cc b/test/galaxy-grand-prime-value-edition.cc
index e063946..7c1c7c7 100644
--- a/test/galaxy-grand-prime-value-edition.cc
+++ b/test/galaxy-grand-prime-value-edition.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Spreadtrum SC7730SE",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-grand-prime-value-edition.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-j7-tmobile.cc b/test/galaxy-j7-tmobile.cc
index 4771d37..b8beced 100644
--- a/test/galaxy-j7-tmobile.cc
+++ b/test/galaxy-j7-tmobile.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 7580",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-j7-tmobile.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-j7-uae.cc b/test/galaxy-j7-uae.cc
index 1b7bfb0..067e053 100644
--- a/test/galaxy-j7-uae.cc
+++ b/test/galaxy-j7-uae.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 7580",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-j7-uae.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s3-us.cc b/test/galaxy-s3-us.cc
index bd03b76..e4dd834 100644
--- a/test/galaxy-s3-us.cc
+++ b/test/galaxy-s3-us.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x511F04D4), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x511F04D4), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8960",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,200 +273,171 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s3-us.h>
@@ -479,4 +450,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s4-us.cc b/test/galaxy-s4-us.cc
index ed9a383..38ab0e2 100644
--- a/test/galaxy-s4-us.cc
+++ b/test/galaxy-s4-us.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x511F06F0), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x511F06F0), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm APQ8064",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,200 +273,171 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s4-us.h>
@@ -479,4 +450,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s5-global.cc b/test/galaxy-s5-global.cc
index 6c62e77..75da2e9 100644
--- a/test/galaxy-s5-global.cc
+++ b/test/galaxy-s5-global.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a15, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a15, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x412FC0F3), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x412FC0F3), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 5422",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -325,246 +325,217 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(16, l2.instances[k].associativity);
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
break;
case 1:
- ASSERT_EQ(8, l2.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
break;
}
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s5-global.h>
@@ -577,4 +548,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s5-us.cc b/test/galaxy-s5-us.cc
index a4f812a..992b93a 100644
--- a/test/galaxy-s5-us.cc
+++ b/test/galaxy-s5-us.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x512F06F1), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x512F06F1), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8974PRO-AC",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,200 +273,171 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s5-us.h>
@@ -479,4 +450,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s6.cc b/test/galaxy-s6.cc
index 7fa1bf4..cb69b9d 100644
--- a/test/galaxy-s6.cc
+++ b/test/galaxy-s6.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x411FD070), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x411FD070), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 7420",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s6.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s7-global.cc b/test/galaxy-s7-global.cc
index ab2ac35..18f91c3 100644
--- a/test/galaxy-s7-global.cc
+++ b/test/galaxy-s7-global.cc
@@ -5,215 +5,215 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_get_core(i)->vendor);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
break;
}
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x531F0011), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x531F0011), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 8890",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -366,265 +366,236 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(128, l1i.instances[k].line_size);
+ ASSERT_EQ(128, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(8, l1d.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s7-global.h>
@@ -637,4 +608,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s7-us.cc b/test/galaxy-s7-us.cc
index c4950c3..de81b00 100644
--- a/test/galaxy-s7-us.cc
+++ b/test/galaxy-s7-us.cc
@@ -5,177 +5,177 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x511F2052), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x511F2052), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x511F2112), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x511F2112), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8996",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -328,213 +328,184 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(3, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s7-us.h>
@@ -547,4 +518,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s8-global.cc b/test/galaxy-s8-global.cc
index eee7a86..a2bfc87 100644
--- a/test/galaxy-s8-global.cc
+++ b/test/galaxy-s8-global.cc
@@ -5,215 +5,215 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_samsung, cpuinfo_get_core(i)->vendor);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
break;
}
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_mongoose, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x534F0010), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x534F0010), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 8895",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -366,265 +366,236 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(128, l1i.instances[k].line_size);
+ ASSERT_EQ(128, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(8, l1d.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s8-global.h>
@@ -637,4 +608,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-s8-us.cc b/test/galaxy-s8-us.cc
index c6a19dc..d636eca 100644
--- a/test/galaxy-s8-us.cc
+++ b/test/galaxy-s8-us.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a73, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x51AF8001), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x51AF8014), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8998",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,272 +353,243 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
}
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(16, l1d.instances[k].associativity);
+ ASSERT_EQ(16, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-s8-us.h>
@@ -631,4 +602,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-tab-3-7.0.cc b/test/galaxy-tab-3-7.0.cc
index ee5e9b1..ff606cf 100644
--- a/test/galaxy-tab-3-7.0.cc
+++ b/test/galaxy-tab-3-7.0.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a9, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x413FC090), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x413FC090), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Marvell PXA986",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(32, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-tab-3-7.0.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/galaxy-win-duos.cc b/test/galaxy-win-duos.cc
index c5d9704..b36973f 100644
--- a/test/galaxy-win-duos.cc
+++ b/test/galaxy-win-duos.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a5, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a5, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC051), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC051), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8625Q",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(32, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <galaxy-win-duos.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/get-current.cc b/test/get-current.cc
new file mode 100644
index 0000000..c9bc1b9
--- /dev/null
+++ b/test/get-current.cc
@@ -0,0 +1,34 @@
+#include <gtest/gtest.h>
+
+#include <cpuinfo.h>
+
+
+TEST(CURRENT_PROCESSOR, not_null) {
+ ASSERT_TRUE(cpuinfo_get_current_processor());
+}
+
+TEST(CURRENT_PROCESSOR, within_bounds) {
+ const struct cpuinfo_processor* current_processor = cpuinfo_get_current_processor();
+ const struct cpuinfo_processor* processors_begin = cpuinfo_get_processors();
+ const struct cpuinfo_processor* processors_end = processors_begin + cpuinfo_get_processors_count();
+ ASSERT_GE(current_processor, processors_begin);
+ ASSERT_LT(current_processor, processors_end);
+}
+
+TEST(CURRENT_CORE, not_null) {
+ ASSERT_TRUE(cpuinfo_get_current_core());
+}
+
+TEST(CURRENT_CORE, within_bounds) {
+ const struct cpuinfo_core* current_core = cpuinfo_get_current_core();
+ const struct cpuinfo_core* cores_begin = cpuinfo_get_cores();
+ const struct cpuinfo_core* cores_end = cores_begin + cpuinfo_get_cores_count();
+ ASSERT_GE(current_core, cores_begin);
+ ASSERT_LT(current_core, cores_end);
+}
+
+int main(int argc, char* argv[]) {
+ cpuinfo_initialize();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/test/huawei-mate-8.cc b/test/huawei-mate-8.cc
index 3b6544f..63e0f1e 100644
--- a/test/huawei-mate-8.cc
+++ b/test/huawei-mate-8.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD080), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("HiSilicon Kirin 950",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <huawei-mate-8.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/huawei-p9-lite.cc b/test/huawei-p9-lite.cc
index ff2f1a9..c47237c 100644
--- a/test/huawei-p9-lite.cc
+++ b/test/huawei-p9-lite.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("HiSilicon Kirin 650",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <huawei-p9-lite.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/init.cc b/test/init.cc
index 67ed926..b8701da 100644
--- a/test/init.cc
+++ b/test/init.cc
@@ -4,480 +4,416 @@
TEST(PROCESSORS_COUNT, non_zero) {
- ASSERT_NE(0, cpuinfo_processors_count);
+ ASSERT_NE(0, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(CORES_COUNT, non_zero) {
- ASSERT_NE(0, cpuinfo_cores_count);
+ ASSERT_NE(0, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, known_vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_NE(cpuinfo_vendor_unknown, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_NE(cpuinfo_vendor_unknown, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, known_uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_NE(cpuinfo_uarch_unknown, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_NE(cpuinfo_uarch_unknown, cpuinfo_get_core(i)->uarch);
}
}
TEST(L1I, non_zero_count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_NE(0, l1i.count);
+ ASSERT_NE(0, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, valid_count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_LE(l1i.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l1i_caches_count(), cpuinfo_get_processors_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, non_zero_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, valid_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].associativity * l1i.instances[k].sets * l1i.instances[k].partitions * l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->associativity * cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, non_zero_associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, non_zero_partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, non_zero_line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, power_of_2_line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].line_size & (l1i.instances[k].line_size - 1));
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->line_size & (cpuinfo_get_l1i_cache(i)->line_size - 1));
}
}
TEST(L1I, valid_line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_GE(l1i.instances[k].line_size, 16);
- ASSERT_LE(l1i.instances[k].line_size, 128);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_GE(cpuinfo_get_l1i_cache(i)->line_size, 16);
+ ASSERT_LE(cpuinfo_get_l1i_cache(i)->line_size, 128);
}
}
TEST(L1I, valid_flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags & ~valid_flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags & ~valid_flags);
}
}
TEST(L1I, non_inclusive) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, l1i.instances[k].flags & CPUINFO_CACHE_INCLUSIVE);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l1i_cache(i)->flags & CPUINFO_CACHE_INCLUSIVE);
}
}
TEST(L1I, non_zero_processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_NE(0, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1I, valid_processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_LT(l1i.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l1i.instances[k].processor_start + l1i.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_LT(cpuinfo_get_l1i_cache(i)->processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l1i_cache(i)->processor_start + cpuinfo_get_l1i_cache(i)->processor_count, cpuinfo_get_processors_count());
}
}
TEST(L1D, non_zero_count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_NE(0, l1d.count);
+ ASSERT_NE(0, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, valid_count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_LE(l1d.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l1d_caches_count(), cpuinfo_get_processors_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, non_zero_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, valid_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].associativity * l1d.instances[k].sets * l1d.instances[k].partitions * l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->associativity * cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, non_zero_associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, non_zero_partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, non_zero_line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, power_of_2_line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].line_size & (l1d.instances[k].line_size - 1));
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->line_size & (cpuinfo_get_l1d_cache(i)->line_size - 1));
}
}
TEST(L1D, valid_line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_GE(l1d.instances[k].line_size, 16);
- ASSERT_LE(l1d.instances[k].line_size, 128);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_GE(cpuinfo_get_l1d_cache(i)->line_size, 16);
+ ASSERT_LE(cpuinfo_get_l1d_cache(i)->line_size, 128);
}
}
TEST(L1D, valid_flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags & ~valid_flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags & ~valid_flags);
}
}
TEST(L1D, non_inclusive) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, l1d.instances[k].flags & CPUINFO_CACHE_INCLUSIVE);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l1d_cache(i)->flags & CPUINFO_CACHE_INCLUSIVE);
}
}
TEST(L1D, non_zero_processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_NE(0, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L1D, valid_processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_LT(l1d.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l1d.instances[k].processor_start + l1d.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_LT(cpuinfo_get_l1d_cache(i)->processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l1d_cache(i)->processor_start + cpuinfo_get_l1d_cache(i)->processor_count, cpuinfo_get_processors_count());
}
}
TEST(L2, valid_count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_LE(l2.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l2_caches_count(), cpuinfo_get_processors_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- if (l2.count != 0) {
- ASSERT_TRUE(l2.instances);
+ if (cpuinfo_get_l2_caches_count() != 0) {
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
}
TEST(L2, non_zero_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, valid_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].associativity * l2.instances[k].sets * l2.instances[k].partitions * l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->associativity * cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, non_zero_associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, non_zero_partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, non_zero_line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, power_of_2_line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].line_size & (l2.instances[k].line_size - 1));
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->line_size & (cpuinfo_get_l2_cache(i)->line_size - 1));
}
}
TEST(L2, valid_line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_GE(l2.instances[k].line_size, 16);
- ASSERT_LE(l2.instances[k].line_size, 128);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_GE(cpuinfo_get_l2_cache(i)->line_size, 16);
+ ASSERT_LE(cpuinfo_get_l2_cache(i)->line_size, 128);
}
}
TEST(L2, valid_flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags & ~valid_flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags & ~valid_flags);
}
}
TEST(L2, non_zero_processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_NE(0, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_NE(0, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L2, valid_processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_LT(l2.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l2.instances[k].processor_start + l2.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_LT(cpuinfo_get_l2_cache(i)->processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l2_cache(i)->processor_start + cpuinfo_get_l2_cache(i)->processor_count, cpuinfo_get_processors_count());
}
}
TEST(L3, valid_count) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_LE(l3.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l3_caches_count(), cpuinfo_get_processors_count());
}
TEST(L3, non_null) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- if (l3.count != 0) {
- ASSERT_TRUE(l3.instances);
+ if (cpuinfo_get_l3_caches_count() != 0) {
+ ASSERT_TRUE(cpuinfo_get_l3_caches());
}
}
TEST(L3, non_zero_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].size);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].size);
}
}
TEST(L3, valid_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_EQ(l3.instances[k].size,
- l3.instances[k].associativity * l3.instances[k].sets * l3.instances[k].partitions * l3.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_EQ(cpuinfo_get_l3_caches()[k].size,
+ cpuinfo_get_l3_caches()[k].associativity * cpuinfo_get_l3_caches()[k].sets * cpuinfo_get_l3_caches()[k].partitions * cpuinfo_get_l3_caches()[k].line_size);
}
}
TEST(L3, non_zero_associativity) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].associativity);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].associativity);
}
}
TEST(L3, non_zero_partitions) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].partitions);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].partitions);
}
}
TEST(L3, non_zero_line_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].line_size);
}
}
TEST(L3, power_of_2_line_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_EQ(0, l3.instances[k].line_size & (l3.instances[k].line_size - 1));
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l3_caches()[k].line_size & (cpuinfo_get_l3_caches()[k].line_size - 1));
}
}
TEST(L3, valid_line_size) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_GE(l3.instances[k].line_size, 16);
- ASSERT_LE(l3.instances[k].line_size, 128);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_GE(cpuinfo_get_l3_caches()[k].line_size, 16);
+ ASSERT_LE(cpuinfo_get_l3_caches()[k].line_size, 128);
}
}
TEST(L3, valid_flags) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_EQ(0, l3.instances[k].flags & ~valid_flags);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l3_caches()[k].flags & ~valid_flags);
}
}
TEST(L3, non_zero_processors) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_NE(0, l3.instances[k].processor_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l3_caches()[k].processor_count);
}
}
TEST(L3, valid_processors) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- for (uint32_t k = 0; k < l3.count; k++) {
- ASSERT_LT(l3.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l3.instances[k].processor_start + l3.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l3_caches_count(); k++) {
+ ASSERT_LT(cpuinfo_get_l3_caches()[k].processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l3_caches()[k].processor_start + cpuinfo_get_l3_caches()[k].processor_count, cpuinfo_get_processors_count());
}
}
TEST(L4, valid_count) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_LE(l4.count, cpuinfo_processors_count);
+ ASSERT_LE(cpuinfo_get_l4_caches_count(), cpuinfo_get_processors_count());
}
TEST(L4, non_null) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- if (l4.count != 0) {
- ASSERT_TRUE(l4.instances);
+ if (cpuinfo_get_l4_caches_count() != 0) {
+ ASSERT_TRUE(cpuinfo_get_l4_caches());
}
}
TEST(L4, non_zero_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].size);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].size);
}
}
TEST(L4, valid_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_EQ(l4.instances[k].size,
- l4.instances[k].associativity * l4.instances[k].sets * l4.instances[k].partitions * l4.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_EQ(cpuinfo_get_l4_caches()[k].size,
+ cpuinfo_get_l4_caches()[k].associativity * cpuinfo_get_l4_caches()[k].sets * cpuinfo_get_l4_caches()[k].partitions * cpuinfo_get_l4_caches()[k].line_size);
}
}
TEST(L4, non_zero_associativity) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].associativity);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].associativity);
}
}
TEST(L4, non_zero_partitions) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].partitions);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].partitions);
}
}
TEST(L4, non_zero_line_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].line_size);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].line_size);
}
}
TEST(L4, power_of_2_line_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_EQ(0, l4.instances[k].line_size & (l4.instances[k].line_size - 1));
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l4_caches()[k].line_size & (cpuinfo_get_l4_caches()[k].line_size - 1));
}
}
TEST(L4, valid_line_size) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_GE(l4.instances[k].line_size, 16);
- ASSERT_LE(l4.instances[k].line_size, 128);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_GE(cpuinfo_get_l4_caches()[k].line_size, 16);
+ ASSERT_LE(cpuinfo_get_l4_caches()[k].line_size, 128);
}
}
TEST(L4, valid_flags) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
const uint32_t valid_flags = CPUINFO_CACHE_UNIFIED | CPUINFO_CACHE_INCLUSIVE | CPUINFO_CACHE_COMPLEX_INDEXING;
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_EQ(0, l4.instances[k].flags & ~valid_flags);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_EQ(0, cpuinfo_get_l4_caches()[k].flags & ~valid_flags);
}
}
TEST(L4, non_zero_processors) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_NE(0, l4.instances[k].processor_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_NE(0, cpuinfo_get_l4_caches()[k].processor_count);
}
}
TEST(L4, valid_processors) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- for (uint32_t k = 0; k < l4.count; k++) {
- ASSERT_LT(l4.instances[k].processor_start, cpuinfo_processors_count);
- ASSERT_LE(l4.instances[k].processor_start + l4.instances[k].processor_count, cpuinfo_processors_count);
+ for (uint32_t k = 0; k < cpuinfo_get_l4_caches_count(); k++) {
+ ASSERT_LT(cpuinfo_get_l4_caches()[k].processor_start, cpuinfo_get_processors_count());
+ ASSERT_LE(cpuinfo_get_l4_caches()[k].processor_start + cpuinfo_get_l4_caches()[k].processor_count, cpuinfo_get_processors_count());
}
}
@@ -485,4 +421,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/lenovo-a6600-plus.cc b/test/lenovo-a6600-plus.cc
index 3482024..e71802d 100644
--- a/test/lenovo-a6600-plus.cc
+++ b/test/lenovo-a6600-plus.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6735P",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <lenovo-a6600-plus.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/lenovo-vibe-x2.cc b/test/lenovo-vibe-x2.cc
index 460e04e..6faae6c 100644
--- a/test/lenovo-vibe-x2.cc
+++ b/test/lenovo-vibe-x2.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a17, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a17, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x410FC0E0), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FC0E0), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6595",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -325,246 +325,217 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
break;
}
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(16, l2.instances[k].associativity);
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
break;
case 1:
- ASSERT_EQ(8, l2.instances[k].associativity);
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
break;
}
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <lenovo-vibe-x2.h>
@@ -577,4 +548,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/lg-k10-eu.cc b/test/lg-k10-eu.cc
index b21377d..e37e3b3 100644
--- a/test/lg-k10-eu.cc
+++ b/test/lg-k10-eu.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8916",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <lg-k10-eu.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/meizu-pro-6s.cc b/test/meizu-pro-6s.cc
index 68a0708..8ac0fa7 100644
--- a/test/meizu-pro-6s.cc
+++ b/test/meizu-pro-6s.cc
@@ -5,145 +5,145 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(10, cpuinfo_processors_count);
+ ASSERT_EQ(10, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 8, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 8, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 6:
case 7:
case 8:
case 9:
- ASSERT_EQ(i - 6, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 6, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
case 6:
case 7:
case 8:
case 9:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[2], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(2), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(10, cpuinfo_cores_count);
+ ASSERT_EQ(10, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a72, cpuinfo_get_core(i)->uarch);
break;
case 2:
case 3:
@@ -153,18 +153,18 @@ TEST(CORES, uarch) {
case 7:
case 8:
case 9:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x410FD081), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD081), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
@@ -174,45 +174,45 @@ TEST(CORES, midr) {
case 7:
case 8:
case 9:
- ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD034), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6797T",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(10, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(10, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(10, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(10, cpuinfo_get_package(i)->core_count);
}
}
@@ -365,22 +365,19 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(10, l1i.count);
+ ASSERT_EQ(10, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 2:
case 3:
@@ -390,19 +387,18 @@ TEST(L1I, size) {
case 7:
case 8:
case 9:
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 2:
case 3:
@@ -412,66 +408,58 @@ TEST(L1I, associativity) {
case 7:
case 8:
case 9:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(10, l1d.count);
+ ASSERT_EQ(10, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
case 2:
case 3:
@@ -481,19 +469,18 @@ TEST(L1D, size) {
case 7:
case 8:
case 9:
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
break;
}
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 2:
case 3:
@@ -503,148 +490,132 @@ TEST(L1D, associativity) {
case 7:
case 8:
case 9:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(3, l2.count);
+ ASSERT_EQ(3, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
case 2:
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
case 2:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 2:
- ASSERT_EQ(6, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(6, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <meizu-pro-6s.h>
@@ -657,4 +628,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/memo-pad-7.cc b/test/memo-pad-7.cc
index c5ac4a9..f73addb 100644
--- a/test/memo-pad-7.cc
+++ b/test/memo-pad-7.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[i / 2], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i / 2), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, cpuid) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x00030678), cpuinfo_cores[i].cpuid);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x00030678), cpuinfo_get_core(i)->cpuid);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Atom Z3745",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -364,8 +364,8 @@ TEST(ISA, movbe) {
ASSERT_TRUE(cpuinfo_has_x86_movbe());
}
-TEST(ISA, lahf_salf) {
- ASSERT_TRUE(cpuinfo_has_x86_lahf_salf());
+TEST(ISA, lahf_sahf) {
+ ASSERT_TRUE(cpuinfo_has_x86_lahf_sahf());
}
TEST(ISA, lzcnt) {
@@ -413,198 +413,169 @@ TEST(ISA, sha) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(8, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(6, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(CPUINFO_CACHE_UNIFIED, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(CPUINFO_CACHE_UNIFIED, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(k * 2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(i * 2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <memo-pad-7.h>
@@ -615,4 +586,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/moto-e-gen1.cc b/test/moto-e-gen1.cc
index c1c94e5..9904aad 100644
--- a/test/moto-e-gen1.cc
+++ b/test/moto-e-gen1.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8610",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <moto-e-gen1.h>
@@ -474,4 +445,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/moto-g-gen2.cc b/test/moto-g-gen2.cc
index d5b1c86..da6b0ac 100644
--- a/test/moto-g-gen2.cc
+++ b/test/moto-g-gen2.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC073), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8226",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <moto-g-gen2.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/moto-g-gen3.cc b/test/moto-g-gen3.cc
index e1265d2..a59312c 100644
--- a/test/moto-g-gen3.cc
+++ b/test/moto-g-gen3.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD030), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8916",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <moto-g-gen3.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus-s.cc b/test/nexus-s.cc
index edfa77c..db68226 100644
--- a/test/nexus-s.cc
+++ b/test/nexus-s.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(1, cpuinfo_processors_count);
+ ASSERT_EQ(1, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(1, cpuinfo_cores_count);
+ ASSERT_EQ(1, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a8, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a8, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x412FC082), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x412FC082), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Samsung Exynos 3110",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(1, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(1, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,196 +273,167 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(1, l1i.count);
+ ASSERT_EQ(1, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(128, l1i.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l1i_cache(i)->sets);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(1, l1d.count);
+ ASSERT_EQ(1, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(128, l1d.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l1d_cache(i)->sets);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(1, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus-s.h>
@@ -475,4 +446,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus4.cc b/test/nexus4.cc
index 755a29d..22002f9 100644
--- a/test/nexus4.cc
+++ b/test/nexus4.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x510F06F2), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x510F06F2), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm APQ8064",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus4.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus5x.cc b/test/nexus5x.cc
index 7082949..4c333e9 100644
--- a/test/nexus5x.cc
+++ b/test/nexus5x.cc
@@ -5,194 +5,194 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(6, cpuinfo_processors_count);
+ ASSERT_EQ(6, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(6, cpuinfo_cores_count);
+ ASSERT_EQ(6, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x411FD072), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x411FD072), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD033), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8992",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(6, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(6, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_package(i)->core_count);
}
}
@@ -345,253 +345,224 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(6, l1i.count);
+ ASSERT_EQ(6, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(6, l1d.count);
+ ASSERT_EQ(6, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 2:
case 3:
case 4:
case 5:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus5x.h>
@@ -604,4 +575,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus6.cc b/test/nexus6.cc
index 7afa612..edbf91b 100644
--- a/test/nexus6.cc
+++ b/test/nexus6.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_krait, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x513F06F1), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x513F06F1), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm APQ8084",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -273,198 +273,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus6.h>
@@ -477,4 +448,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus6p.cc b/test/nexus6p.cc
index 1a0fc0b..e356e14 100644
--- a/test/nexus6p.cc
+++ b/test/nexus6p.cc
@@ -5,202 +5,202 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
break;
}
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_get_core(i)->midr);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8994",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -353,259 +353,230 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
break;
}
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
break;
}
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
break;
}
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
break;
case 1:
- ASSERT_EQ(0, l2.instances[k].flags);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
break;
}
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus6p.h>
@@ -618,4 +589,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/nexus9.cc b/test/nexus9.cc
index 3e0a71c..abdfec7 100644
--- a/test/nexus9.cc
+++ b/test/nexus9.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(2, cpuinfo_processors_count);
+ ASSERT_EQ(2, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(2, cpuinfo_cores_count);
+ ASSERT_EQ(2, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_nvidia, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_nvidia, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_denver, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_denver, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x4E0F0000), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x4E0F0000), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("nVidia Tegra T132",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(2, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_package(i)->core_count);
}
}
@@ -301,198 +301,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(128 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(128 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <nexus9.h>
@@ -505,4 +476,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/oppo-r9.cc b/test/oppo-r9.cc
index 5228582..2d954b8 100644
--- a/test/oppo-r9.cc
+++ b/test/oppo-r9.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, DISABLED_linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6755",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <oppo-r9.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/pixel-c.cc b/test/pixel-c.cc
index 5d92cf6..28cf7f9 100644
--- a/test/pixel-c.cc
+++ b/test/pixel-c.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a57, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x411FD071), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("nVidia Tegra T210",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -301,198 +301,169 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(48 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(48 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(3, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(2, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(2 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(2 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(CPUINFO_CACHE_INCLUSIVE, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <pixel-c.h>
@@ -505,4 +476,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/pixel-xl.cc b/test/pixel-xl.cc
index 62b5e73..0c6d374 100644
--- a/test/pixel-xl.cc
+++ b/test/pixel-xl.cc
@@ -5,177 +5,177 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8996PRO-AB",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -328,213 +328,184 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(3, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <pixel-xl.h>
@@ -547,4 +518,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/pixel.cc b/test/pixel.cc
index 5b8dce3..2175c85 100644
--- a/test/pixel.cc
+++ b/test/pixel.cc
@@ -5,177 +5,177 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(i + 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 2, cpuinfo_get_processor(i)->linux_id);
break;
case 2:
case 3:
- ASSERT_EQ(i - 2, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 2, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_qualcomm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_kryo, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
switch (i) {
case 0:
case 1:
- ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2051), cpuinfo_get_core(i)->midr);
break;
case 2:
case 3:
- ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_cores[i].midr);
+ ASSERT_EQ(UINT32_C(0x512F2011), cpuinfo_get_core(i)->midr);
break;
}
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Qualcomm MSM8996PRO-AB",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -328,213 +328,184 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(3, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(3, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(1 * 1024 * 1024, l2.instances[k].size);
+ ASSERT_EQ(1 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(128, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <pixel.h>
@@ -547,4 +518,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/process.py b/test/process.py
new file mode 100644
index 0000000..18b6161
--- /dev/null
+++ b/test/process.py
@@ -0,0 +1,16 @@
+import re
+import sys
+
+for fn in sys.argv[1:]:
+ code = open(fn).read().splitlines()
+ new_code = []
+ for line in code:
+ if line.strip().startswith("cpuinfo_caches l"):
+ continue
+ if "switch (k)" in line:
+ line = line.replace("switch (k)", "switch (i)")
+ elif "ASSERT_EQ(k," in line:
+ line = line.replace("ASSERT_EQ(k,", "ASSERT_EQ(i,")
+ new_code.append(line)
+
+ open(fn, "w").write("\n".join(new_code))
diff --git a/test/scaleway.cc b/test/scaleway.cc
index c53facb..c794fcb 100644
--- a/test/scaleway.cc
+++ b/test/scaleway.cc
@@ -9,18 +9,18 @@ TEST(PROCESSORS, count) {
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, vendor) {
for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_cavium, cpuinfo_processors[i].vendor);
+ ASSERT_EQ(cpuinfo_vendor_cavium, cpuinfo_get_processors()[i].vendor);
}
}
TEST(PROCESSORS, uarch) {
for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_thunderx, cpuinfo_processors[i].uarch);
+ ASSERT_EQ(cpuinfo_uarch_thunderx, cpuinfo_get_processors()[i].uarch);
}
}
@@ -139,195 +139,166 @@ TEST(ISA, fcma) {
#endif /* CPUINFO_ARCH_ARM64 */
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(2, l1i.count);
+ ASSERT_EQ(2, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(78 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(78 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(4, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(312, l1i.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(312, cpuinfo_get_l1i_cache(i)->sets);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(2, l1d.count);
+ ASSERT_EQ(2, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(128, l1d.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(128, cpuinfo_get_l1d_cache(i)->sets);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(1, l2.count);
+ ASSERT_EQ(1, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16 * 1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(32768, l2.instances[k].sets);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(32768, cpuinfo_get_l2_cache(i)->sets);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <scaleway.h>
@@ -337,4 +308,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/xiaomi-redmi-2a.cc b/test/xiaomi-redmi-2a.cc
index 0176a95..eaf1a4e 100644
--- a/test/xiaomi-redmi-2a.cc
+++ b/test/xiaomi-redmi-2a.cc
@@ -5,463 +5,462 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(5, cpuinfo_processors_count);
+ ASSERT_EQ(5, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(5, cpuinfo_cores_count);
+ ASSERT_EQ(5, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a7, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FC075), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Leadcore LC1860",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(5, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(5, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(5, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(5, cpuinfo_get_package(i)->core_count);
}
}
TEST(ISA, thumb) {
- ASSERT_TRUE(cpuinfo_isa.thumb);
+ ASSERT_TRUE(cpuinfo_has_arm_thumb());
}
TEST(ISA, thumb2) {
- ASSERT_TRUE(cpuinfo_isa.thumb2);
-}
-
-TEST(ISA, thumbee) {
- ASSERT_FALSE(cpuinfo_isa.thumbee);
-}
-
-TEST(ISA, jazelle) {
- ASSERT_FALSE(cpuinfo_isa.jazelle);
+ ASSERT_TRUE(cpuinfo_has_arm_thumb2());
}
TEST(ISA, armv5e) {
- ASSERT_TRUE(cpuinfo_isa.armv5e);
+ ASSERT_TRUE(cpuinfo_has_arm_v5e());
}
TEST(ISA, armv6) {
- ASSERT_TRUE(cpuinfo_isa.armv6);
+ ASSERT_TRUE(cpuinfo_has_arm_v6());
}
TEST(ISA, armv6k) {
- ASSERT_TRUE(cpuinfo_isa.armv6k);
+ ASSERT_TRUE(cpuinfo_has_arm_v6k());
}
TEST(ISA, armv7) {
- ASSERT_TRUE(cpuinfo_isa.armv7);
+ ASSERT_TRUE(cpuinfo_has_arm_v7());
}
TEST(ISA, armv7mp) {
- ASSERT_TRUE(cpuinfo_isa.armv7mp);
+ ASSERT_TRUE(cpuinfo_has_arm_v7mp());
}
TEST(ISA, idiv) {
- ASSERT_TRUE(cpuinfo_isa.idiv);
+ ASSERT_TRUE(cpuinfo_has_arm_idiv());
}
TEST(ISA, vfpv2) {
- ASSERT_FALSE(cpuinfo_isa.vfpv2);
+ ASSERT_FALSE(cpuinfo_has_arm_vfpv2());
}
TEST(ISA, vfpv3) {
- ASSERT_TRUE(cpuinfo_isa.vfpv3);
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3());
}
-TEST(ISA, d32) {
- ASSERT_TRUE(cpuinfo_isa.d32);
+TEST(ISA, vfpv3_d32) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3_d32());
}
-TEST(ISA, fp16) {
- ASSERT_TRUE(cpuinfo_isa.fp16);
+TEST(ISA, vfpv3_fp16) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3_fp16());
}
-TEST(ISA, fma) {
- ASSERT_TRUE(cpuinfo_isa.fma);
+TEST(ISA, vfpv3_fp16_d32) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv3_fp16_d32());
+}
+
+TEST(ISA, vfpv4) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv4());
+}
+
+TEST(ISA, vfpv4_d32) {
+ ASSERT_TRUE(cpuinfo_has_arm_vfpv4_d32());
}
TEST(ISA, wmmx) {
- ASSERT_FALSE(cpuinfo_isa.wmmx);
+ ASSERT_FALSE(cpuinfo_has_arm_wmmx());
}
TEST(ISA, wmmx2) {
- ASSERT_FALSE(cpuinfo_isa.wmmx2);
+ ASSERT_FALSE(cpuinfo_has_arm_wmmx2());
}
TEST(ISA, neon) {
- ASSERT_TRUE(cpuinfo_isa.neon);
+ ASSERT_TRUE(cpuinfo_has_arm_neon());
+}
+
+TEST(ISA, neon_fp16) {
+ ASSERT_TRUE(cpuinfo_has_arm_neon_fp16());
+}
+
+TEST(ISA, neon_fma) {
+ ASSERT_TRUE(cpuinfo_has_arm_neon_fma());
+}
+
+TEST(ISA, atomics) {
+ ASSERT_FALSE(cpuinfo_has_arm_atomics());
+}
+
+TEST(ISA, neon_rdm) {
+ ASSERT_FALSE(cpuinfo_has_arm_neon_rdm());
+}
+
+TEST(ISA, fp16_arith) {
+ ASSERT_FALSE(cpuinfo_has_arm_fp16_arith());
+}
+
+TEST(ISA, jscvt) {
+ ASSERT_FALSE(cpuinfo_has_arm_jscvt());
+}
+
+TEST(ISA, fcma) {
+ ASSERT_FALSE(cpuinfo_has_arm_fcma());
}
TEST(ISA, aes) {
- ASSERT_FALSE(cpuinfo_isa.aes);
+ ASSERT_FALSE(cpuinfo_has_arm_aes());
}
TEST(ISA, sha1) {
- ASSERT_FALSE(cpuinfo_isa.sha1);
+ ASSERT_FALSE(cpuinfo_has_arm_sha1());
}
TEST(ISA, sha2) {
- ASSERT_FALSE(cpuinfo_isa.sha2);
+ ASSERT_FALSE(cpuinfo_has_arm_sha2());
}
TEST(ISA, pmull) {
- ASSERT_FALSE(cpuinfo_isa.pmull);
+ ASSERT_FALSE(cpuinfo_has_arm_pmull());
}
TEST(ISA, crc32) {
- ASSERT_FALSE(cpuinfo_isa.crc32);
+ ASSERT_FALSE(cpuinfo_has_arm_crc32());
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(5, l1i.count);
+ ASSERT_EQ(5, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(5, l1d.count);
+ ASSERT_EQ(5, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(32 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(512 * 1024, l2.instances[k].size);
+ ASSERT_EQ(512 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
case 1:
- ASSERT_EQ(128 * 1024, l2.instances[k].size);
+ ASSERT_EQ(128 * 1024, cpuinfo_get_l2_cache(i)->size);
break;
}
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(8, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(1, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <xiaomi-redmi-2a.h>
@@ -474,4 +473,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/xperia-c4-dual.cc b/test/xperia-c4-dual.cc
index 07c5e63..e638765 100644
--- a/test/xperia-c4-dual.cc
+++ b/test/xperia-c4-dual.cc
@@ -5,176 +5,176 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(8, cpuinfo_processors_count);
+ ASSERT_EQ(8, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, DISABLED_linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(i + 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i + 4, cpuinfo_get_processor(i)->linux_id);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(i - 4, cpuinfo_processors[i].linux_id);
+ ASSERT_EQ(i - 4, cpuinfo_get_processor(i)->linux_id);
break;
}
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
switch (i) {
case 0:
case 1:
case 2:
case 3:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[0], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(0), cpuinfo_get_processor(i)->cache.l2);
break;
case 4:
case 5:
case 6:
case 7:
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[1], cpuinfo_processors[i].cache.l2);
+ ASSERT_EQ(cpuinfo_get_l2_cache(1), cpuinfo_get_processor(i)->cache.l2);
break;
}
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(8, cpuinfo_cores_count);
+ ASSERT_EQ(8, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_arm, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_cortex_a53, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, midr) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_cores[i].midr);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x410FD032), cpuinfo_get_core(i)->midr);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("MediaTek MT6752",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(8, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_package(i)->core_count);
}
}
@@ -327,206 +327,177 @@ TEST(ISA, crc32) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(8, l1i.count);
+ ASSERT_EQ(8, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(16 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(2, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(2, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(8, l1d.count);
+ ASSERT_EQ(8, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(16 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(16 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(4, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(256 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(256 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(0, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- switch (k) {
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ switch (i) {
case 0:
- ASSERT_EQ(0, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(0, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
case 1:
- ASSERT_EQ(4, l2.instances[k].processor_start);
- ASSERT_EQ(4, l2.instances[k].processor_count);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(4, cpuinfo_get_l2_cache(i)->processor_count);
break;
}
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <xperia-c4-dual.h>
@@ -539,4 +510,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/test/zenfone-2.cc b/test/zenfone-2.cc
index 7ef23aa..8a7f824 100644
--- a/test/zenfone-2.cc
+++ b/test/zenfone-2.cc
@@ -5,150 +5,150 @@
TEST(PROCESSORS, count) {
- ASSERT_EQ(4, cpuinfo_processors_count);
+ ASSERT_EQ(4, cpuinfo_get_processors_count());
}
TEST(PROCESSORS, non_null) {
- ASSERT_TRUE(cpuinfo_processors);
+ ASSERT_TRUE(cpuinfo_get_processors());
}
TEST(PROCESSORS, smt_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(0, cpuinfo_processors[i].smt_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_processor(i)->smt_id);
}
}
TEST(PROCESSORS, core) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_cores[i], cpuinfo_processors[i].core);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_core(i), cpuinfo_get_processor(i)->core);
}
}
TEST(PROCESSORS, package) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_processors[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_processor(i)->package);
}
}
TEST(PROCESSORS, linux_id) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(i, cpuinfo_processors[i].linux_id);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_processor(i)->linux_id);
}
}
TEST(PROCESSORS, l1i) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1i_cache().instances[i], cpuinfo_processors[i].cache.l1i);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i), cpuinfo_get_processor(i)->cache.l1i);
}
}
TEST(PROCESSORS, l1d) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l1d_cache().instances[i], cpuinfo_processors[i].cache.l1d);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i), cpuinfo_get_processor(i)->cache.l1d);
}
}
TEST(PROCESSORS, l2) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_EQ(&cpuinfo_get_l2_cache().instances[i / 2], cpuinfo_processors[i].cache.l2);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i / 2), cpuinfo_get_processor(i)->cache.l2);
}
}
TEST(PROCESSORS, l3) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l3);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l3);
}
}
TEST(PROCESSORS, l4) {
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- ASSERT_FALSE(cpuinfo_processors[i].cache.l4);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ ASSERT_FALSE(cpuinfo_get_processor(i)->cache.l4);
}
}
TEST(CORES, count) {
- ASSERT_EQ(4, cpuinfo_cores_count);
+ ASSERT_EQ(4, cpuinfo_get_cores_count());
}
TEST(CORES, non_null) {
- ASSERT_TRUE(cpuinfo_cores);
+ ASSERT_TRUE(cpuinfo_get_cores());
}
TEST(CORES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->processor_start);
}
}
TEST(CORES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(1, cpuinfo_cores[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_core(i)->processor_count);
}
}
TEST(CORES, core_id) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(i, cpuinfo_cores[i].core_id);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_core(i)->core_id);
}
}
TEST(CORES, package) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(&cpuinfo_packages[0], cpuinfo_cores[i].package);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_package(0), cpuinfo_get_core(i)->package);
}
}
TEST(CORES, vendor) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_cores[i].vendor);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_vendor_intel, cpuinfo_get_core(i)->vendor);
}
}
TEST(CORES, uarch) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_cores[i].uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(cpuinfo_uarch_silvermont, cpuinfo_get_core(i)->uarch);
}
}
TEST(CORES, cpuid) {
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- ASSERT_EQ(UINT32_C(0x000506A0), cpuinfo_cores[i].cpuid);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ ASSERT_EQ(UINT32_C(0x000506A0), cpuinfo_get_core(i)->cpuid);
}
}
TEST(PACKAGES, count) {
- ASSERT_EQ(1, cpuinfo_packages_count);
+ ASSERT_EQ(1, cpuinfo_get_packages_count());
}
TEST(PACKAGES, name) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
ASSERT_EQ("Atom Z3580",
- std::string(cpuinfo_packages[i].name,
- strnlen(cpuinfo_packages[i].name, CPUINFO_PACKAGE_NAME_MAX)));
+ std::string(cpuinfo_get_package(i)->name,
+ strnlen(cpuinfo_get_package(i)->name, CPUINFO_PACKAGE_NAME_MAX)));
}
}
TEST(PACKAGES, processor_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->processor_start);
}
}
TEST(PACKAGES, processor_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->processor_count);
}
}
TEST(PACKAGES, core_start) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(0, cpuinfo_packages[i].core_start);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_package(i)->core_start);
}
}
TEST(PACKAGES, core_count) {
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- ASSERT_EQ(4, cpuinfo_packages[i].core_count);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ ASSERT_EQ(4, cpuinfo_get_package(i)->core_count);
}
}
@@ -364,8 +364,8 @@ TEST(ISA, movbe) {
ASSERT_TRUE(cpuinfo_has_x86_movbe());
}
-TEST(ISA, lahf_salf) {
- ASSERT_TRUE(cpuinfo_has_x86_lahf_salf());
+TEST(ISA, lahf_sahf) {
+ ASSERT_TRUE(cpuinfo_has_x86_lahf_sahf());
}
TEST(ISA, lzcnt) {
@@ -413,198 +413,169 @@ TEST(ISA, sha) {
}
TEST(L1I, count) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_EQ(4, l1i.count);
+ ASSERT_EQ(4, cpuinfo_get_l1i_caches_count());
}
TEST(L1I, non_null) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- ASSERT_TRUE(l1i.instances);
+ ASSERT_TRUE(cpuinfo_get_l1i_caches());
}
TEST(L1I, size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(32 * 1024, l1i.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(32 * 1024, cpuinfo_get_l1i_cache(i)->size);
}
}
TEST(L1I, associativity) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(8, l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(8, cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, sets) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(l1i.instances[k].size,
- l1i.instances[k].sets * l1i.instances[k].line_size * l1i.instances[k].partitions * l1i.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1i_cache(i)->size,
+ cpuinfo_get_l1i_cache(i)->sets * cpuinfo_get_l1i_cache(i)->line_size * cpuinfo_get_l1i_cache(i)->partitions * cpuinfo_get_l1i_cache(i)->associativity);
}
}
TEST(L1I, partitions) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(1, l1i.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->partitions);
}
}
TEST(L1I, line_size) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(64, l1i.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1i_cache(i)->line_size);
}
}
TEST(L1I, flags) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(0, l1i.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1i_cache(i)->flags);
}
}
TEST(L1I, processors) {
- cpuinfo_caches l1i = cpuinfo_get_l1i_cache();
- for (uint32_t k = 0; k < l1i.count; k++) {
- ASSERT_EQ(k, l1i.instances[k].processor_start);
- ASSERT_EQ(1, l1i.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1i_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1i_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1i_cache(i)->processor_count);
}
}
TEST(L1D, count) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_EQ(4, l1d.count);
+ ASSERT_EQ(4, cpuinfo_get_l1d_caches_count());
}
TEST(L1D, non_null) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- ASSERT_TRUE(l1d.instances);
+ ASSERT_TRUE(cpuinfo_get_l1d_caches());
}
TEST(L1D, size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(24 * 1024, l1d.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(24 * 1024, cpuinfo_get_l1d_cache(i)->size);
}
}
TEST(L1D, associativity) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(6, l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(6, cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, sets) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(l1d.instances[k].size,
- l1d.instances[k].sets * l1d.instances[k].line_size * l1d.instances[k].partitions * l1d.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l1d_cache(i)->size,
+ cpuinfo_get_l1d_cache(i)->sets * cpuinfo_get_l1d_cache(i)->line_size * cpuinfo_get_l1d_cache(i)->partitions * cpuinfo_get_l1d_cache(i)->associativity);
}
}
TEST(L1D, partitions) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(1, l1d.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->partitions);
}
}
TEST(L1D, line_size) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(64, l1d.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l1d_cache(i)->line_size);
}
}
TEST(L1D, flags) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(0, l1d.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(0, cpuinfo_get_l1d_cache(i)->flags);
}
}
TEST(L1D, processors) {
- cpuinfo_caches l1d = cpuinfo_get_l1d_cache();
- for (uint32_t k = 0; k < l1d.count; k++) {
- ASSERT_EQ(k, l1d.instances[k].processor_start);
- ASSERT_EQ(1, l1d.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l1d_caches_count(); i++) {
+ ASSERT_EQ(i, cpuinfo_get_l1d_cache(i)->processor_start);
+ ASSERT_EQ(1, cpuinfo_get_l1d_cache(i)->processor_count);
}
}
TEST(L2, count) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_EQ(2, l2.count);
+ ASSERT_EQ(2, cpuinfo_get_l2_caches_count());
}
TEST(L2, non_null) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- ASSERT_TRUE(l2.instances);
+ ASSERT_TRUE(cpuinfo_get_l2_caches());
}
TEST(L2, size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1024 * 1024, l2.instances[k].size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1024 * 1024, cpuinfo_get_l2_cache(i)->size);
}
}
TEST(L2, associativity) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(16, l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(16, cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, sets) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(l2.instances[k].size,
- l2.instances[k].sets * l2.instances[k].line_size * l2.instances[k].partitions * l2.instances[k].associativity);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(cpuinfo_get_l2_cache(i)->size,
+ cpuinfo_get_l2_cache(i)->sets * cpuinfo_get_l2_cache(i)->line_size * cpuinfo_get_l2_cache(i)->partitions * cpuinfo_get_l2_cache(i)->associativity);
}
}
TEST(L2, partitions) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(1, l2.instances[k].partitions);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(1, cpuinfo_get_l2_cache(i)->partitions);
}
}
TEST(L2, line_size) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(64, l2.instances[k].line_size);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(64, cpuinfo_get_l2_cache(i)->line_size);
}
}
TEST(L2, flags) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(CPUINFO_CACHE_UNIFIED, l2.instances[k].flags);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(CPUINFO_CACHE_UNIFIED, cpuinfo_get_l2_cache(i)->flags);
}
}
TEST(L2, processors) {
- cpuinfo_caches l2 = cpuinfo_get_l2_cache();
- for (uint32_t k = 0; k < l2.count; k++) {
- ASSERT_EQ(k * 2, l2.instances[k].processor_start);
- ASSERT_EQ(2, l2.instances[k].processor_count);
+ for (uint32_t i = 0; i < cpuinfo_get_l2_caches_count(); i++) {
+ ASSERT_EQ(i * 2, cpuinfo_get_l2_cache(i)->processor_start);
+ ASSERT_EQ(2, cpuinfo_get_l2_cache(i)->processor_count);
}
}
TEST(L3, none) {
- cpuinfo_caches l3 = cpuinfo_get_l3_cache();
- ASSERT_EQ(0, l3.count);
- ASSERT_FALSE(l3.instances);
+ ASSERT_EQ(0, cpuinfo_get_l3_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l3_caches());
}
TEST(L4, none) {
- cpuinfo_caches l4 = cpuinfo_get_l4_cache();
- ASSERT_EQ(0, l4.count);
- ASSERT_FALSE(l4.instances);
+ ASSERT_EQ(0, cpuinfo_get_l4_caches_count());
+ ASSERT_FALSE(cpuinfo_get_l4_caches());
}
#include <zenfone-2.h>
@@ -615,4 +586,4 @@ int main(int argc, char* argv[]) {
cpuinfo_initialize();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
-}
+} \ No newline at end of file
diff --git a/tools/cache-info.c b/tools/cache-info.c
index aa24b17..cbe9e6e 100644
--- a/tools/cache-info.c
+++ b/tools/cache-info.c
@@ -56,19 +56,19 @@ void report_cache(
int main(int argc, char** argv) {
cpuinfo_initialize();
- if ((cpuinfo_get_l1i_cache().count != 0 && cpuinfo_get_l1i_cache().instances->flags & CPUINFO_CACHE_UNIFIED) == 0) {
- report_cache(cpuinfo_get_l1i_cache().count, cpuinfo_get_l1i_cache().instances, 1, "instruction");
+ if (cpuinfo_get_l1i_caches_count() != 0 && (cpuinfo_get_l1i_cache(0)->flags & CPUINFO_CACHE_UNIFIED) == 0) {
+ report_cache(cpuinfo_get_l1i_caches_count(), cpuinfo_get_l1i_cache(0), 1, "instruction");
}
- if (cpuinfo_get_l1d_cache().count != 0) {
- report_cache(cpuinfo_get_l1d_cache().count, cpuinfo_get_l1d_cache().instances, 1, "data");
+ if (cpuinfo_get_l1d_caches_count() != 0) {
+ report_cache(cpuinfo_get_l1d_caches_count(), cpuinfo_get_l1d_cache(0), 1, "data");
}
- if (cpuinfo_get_l2_cache().count != 0) {
- report_cache(cpuinfo_get_l2_cache().count, cpuinfo_get_l2_cache().instances, 2, "data");
+ if (cpuinfo_get_l2_caches_count() != 0) {
+ report_cache(cpuinfo_get_l2_caches_count(), cpuinfo_get_l2_cache(0), 2, "data");
}
- if (cpuinfo_get_l3_cache().count != 0) {
- report_cache(cpuinfo_get_l3_cache().count, cpuinfo_get_l3_cache().instances, 3, "data");
+ if (cpuinfo_get_l3_caches_count() != 0) {
+ report_cache(cpuinfo_get_l3_caches_count(), cpuinfo_get_l3_cache(0), 3, "data");
}
- if (cpuinfo_get_l4_cache().count != 0) {
- report_cache(cpuinfo_get_l4_cache().count, cpuinfo_get_l4_cache().instances, 4, "data");
+ if (cpuinfo_get_l4_caches_count() != 0) {
+ report_cache(cpuinfo_get_l4_caches_count(), cpuinfo_get_l4_cache(0), 4, "data");
}
}
diff --git a/tools/cpu-info.c b/tools/cpu-info.c
index 9d0efec..e6c2703 100644
--- a/tools/cpu-info.c
+++ b/tools/cpu-info.c
@@ -184,37 +184,35 @@ static const char* uarch_to_string(enum cpuinfo_uarch uarch) {
int main(int argc, char** argv) {
cpuinfo_initialize();
#ifdef __ANDROID__
- printf("SoC name: %s\n", cpuinfo_packages[0].name);
- printf("GPU name: %s\n", cpuinfo_packages[0].gpu_name);
+ printf("SoC name: %s\n", cpuinfo_get_package(0)->name);
+ printf("GPU name: %s\n", cpuinfo_get_package(0)->gpu_name);
#else
printf("Packages:\n");
- for (uint32_t i = 0; i < cpuinfo_packages_count; i++) {
- printf("\t%"PRIu32": %s\n", i, cpuinfo_packages[i].name);
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ printf("\t%"PRIu32": %s\n", i, cpuinfo_get_package(i)->name);
}
#endif
printf("Cores:\n");
- for (uint32_t i = 0; i < cpuinfo_cores_count; i++) {
- if (cpuinfo_cores[i].processor_count == 1) {
- printf("\t%"PRIu32": 1 processor (%"PRIu32")\n",
- i, cpuinfo_cores[i].processor_start);
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ const struct cpuinfo_core* core = cpuinfo_get_core(i);
+ if (core->processor_count == 1) {
+ printf("\t%"PRIu32": 1 processor (%"PRIu32")\n", i, core->processor_start);
} else {
printf("\t%"PRIu32": %"PRIu32" processors (%"PRIu32"-%"PRIu32")\n",
- i,
- cpuinfo_cores[i].processor_count,
- cpuinfo_cores[i].processor_start,
- cpuinfo_cores[i].processor_start + cpuinfo_cores[i].processor_count - 1);
+ i, core->processor_count, core->processor_start, core->processor_start + core->processor_count - 1);
}
}
printf("Logical processors:\n");
- for (uint32_t i = 0; i < cpuinfo_processors_count; i++) {
- const char* vendor_string = vendor_to_string(cpuinfo_processors[i].core->vendor);
- const char* uarch_string = uarch_to_string(cpuinfo_processors[i].core->uarch);
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
+ const char* vendor_string = vendor_to_string(processor->core->vendor);
+ const char* uarch_string = uarch_to_string(processor->core->uarch);
if (vendor_string == NULL) {
printf("\t%"PRIu32": vendor 0x%08"PRIx32" uarch 0x%08"PRIx32"\n",
- i, (uint32_t) cpuinfo_processors[i].core->vendor, (uint32_t) cpuinfo_processors[i].core->uarch);
+ i, (uint32_t) processor->core->vendor, (uint32_t) processor->core->uarch);
} else if (uarch_string == NULL) {
printf("\t%"PRIu32": %s uarch 0x%08"PRIx32"\n",
- i, vendor_string, (uint32_t) cpuinfo_processors[i].core->uarch);
+ i, vendor_string, (uint32_t) processor->core->uarch);
} else {
printf("\t%"PRIu32": %s %s\n", i, vendor_string, uarch_string);
}
diff --git a/tools/gpu-dump.c b/tools/gpu-dump.c
new file mode 100644
index 0000000..d7cfa9e
--- /dev/null
+++ b/tools/gpu-dump.c
@@ -0,0 +1,450 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <dlfcn.h>
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+
+
+#define COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
+
+
+struct egl_enum_item {
+ EGLint id;
+ const char* name;
+};
+
+struct egl_enum_item egl_enum_boolean[] = {
+ {
+ .id = EGL_TRUE,
+ .name = "EGL_TRUE",
+ },
+ {
+ .id = EGL_FALSE,
+ .name = "EGL_FALSE",
+ },
+};
+
+struct egl_enum_item egl_enum_caveat[] = {
+ {
+ .id = EGL_NONE,
+ .name = "EGL_NONE",
+ },
+ {
+ .id = EGL_SLOW_CONFIG,
+ .name = "EGL_SLOW_CONFIG",
+ },
+ {
+ .id = EGL_NON_CONFORMANT_CONFIG,
+ .name = "EGL_NON_CONFORMANT_CONFIG",
+ },
+};
+
+struct egl_enum_item egl_enum_transparency[] = {
+ {
+ .id = EGL_NONE,
+ .name = "EGL_NONE",
+ },
+ {
+ .id = EGL_TRANSPARENT_RGB,
+ .name = "EGL_TRANSPARENT_RGB",
+ },
+};
+
+struct egl_enum_item egl_enum_color_buffer[] = {
+ {
+ .id = EGL_RGB_BUFFER,
+ .name = "EGL_RGB_BUFFER",
+ },
+ {
+ .id = EGL_LUMINANCE_BUFFER,
+ .name = "EGL_LUMINANCE_BUFFER",
+ },
+};
+
+#ifndef EGL_OPENGL_ES3_BIT
+ #define EGL_OPENGL_ES3_BIT 0x40
+#endif
+
+struct egl_enum_item egl_enum_conformant[] = {
+ {
+ .id = EGL_OPENGL_BIT,
+ .name = "EGL_OPENGL_BIT",
+ },
+ {
+ .id = EGL_OPENGL_ES_BIT,
+ .name = "EGL_OPENGL_ES_BIT",
+ },
+ {
+ .id = EGL_OPENGL_ES2_BIT,
+ .name = "EGL_OPENGL_ES2_BIT",
+ },
+ {
+ .id = EGL_OPENGL_ES3_BIT,
+ .name = "EGL_OPENGL_ES3_BIT",
+ },
+ {
+ .id = EGL_OPENVG_BIT,
+ .name = "EGL_OPENVG_BIT",
+ },
+};
+
+struct egl_enum_item egl_enum_surface_type[] = {
+ {
+ .id = EGL_PBUFFER_BIT,
+ .name = "EGL_PBUFFER_BIT",
+ },
+ {
+ .id = EGL_PIXMAP_BIT,
+ .name = "EGL_PIXMAP_BIT",
+ },
+ {
+ .id = EGL_WINDOW_BIT,
+ .name = "EGL_WINDOW_BIT",
+ },
+ {
+ .id = EGL_VG_COLORSPACE_LINEAR_BIT,
+ .name = "EGL_VG_COLORSPACE_LINEAR_BIT",
+ },
+ {
+ .id = EGL_VG_ALPHA_FORMAT_PRE_BIT,
+ .name = "EGL_VG_ALPHA_FORMAT_PRE_BIT",
+ },
+ {
+ .id = EGL_MULTISAMPLE_RESOLVE_BOX_BIT,
+ .name = "EGL_MULTISAMPLE_RESOLVE_BOX_BIT",
+ },
+ {
+ .id = EGL_SWAP_BEHAVIOR_PRESERVED_BIT,
+ .name = "EGL_SWAP_BEHAVIOR_PRESERVED_BIT",
+ },
+};
+
+struct egl_enum_item egl_enum_renderable_type[] = {
+ {
+ .id = EGL_OPENGL_ES_BIT,
+ .name = "EGL_OPENGL_ES_BIT",
+ },
+ {
+ .id = EGL_OPENVG_BIT,
+ .name = "EGL_OPENVG_BIT",
+ },
+ {
+ .id = EGL_OPENGL_ES2_BIT,
+ .name = "EGL_OPENGL_ES2_BIT",
+ },
+ {
+ .id = EGL_OPENGL_BIT,
+ .name = "EGL_OPENGL_BIT",
+ },
+ {
+ .id = EGL_OPENGL_ES3_BIT,
+ .name = "EGL_OPENGL_ES3_BIT",
+ },
+};
+
+struct egl_config_attribute {
+ EGLint id;
+ const char* name;
+ int32_t cardinality;
+ const struct egl_enum_item* values;
+};
+
+struct egl_config_attribute egl_config_attributes[] = {
+ {
+ .id = EGL_CONFIG_ID,
+ .name = "EGL_CONFIG_ID",
+ },
+ {
+ .id = EGL_CONFIG_CAVEAT,
+ .name = "EGL_CONFIG_CAVEAT",
+ .cardinality = COUNT_OF(egl_enum_caveat),
+ .values = egl_enum_caveat,
+ },
+ {
+ .id = EGL_LUMINANCE_SIZE,
+ .name = "EGL_LUMINANCE_SIZE",
+ },
+ {
+ .id = EGL_RED_SIZE,
+ .name = "EGL_RED_SIZE",
+ },
+ {
+ .id = EGL_GREEN_SIZE,
+ .name = "EGL_GREEN_SIZE",
+ },
+ {
+ .id = EGL_BLUE_SIZE,
+ .name = "EGL_BLUE_SIZE",
+ },
+ {
+ .id = EGL_ALPHA_SIZE,
+ .name = "EGL_ALPHA_SIZE",
+ },
+ {
+ .id = EGL_DEPTH_SIZE,
+ .name = "EGL_DEPTH_SIZE",
+ },
+ {
+ .id = EGL_STENCIL_SIZE,
+ .name = "EGL_STENCIL_SIZE",
+ },
+ {
+ .id = EGL_ALPHA_MASK_SIZE,
+ .name = "EGL_ALPHA_MASK_SIZE",
+ },
+ {
+ .id = EGL_BIND_TO_TEXTURE_RGB,
+ .name = "EGL_BIND_TO_TEXTURE_RGB",
+ .cardinality = COUNT_OF(egl_enum_boolean),
+ .values = egl_enum_boolean,
+ },
+ {
+ .id = EGL_BIND_TO_TEXTURE_RGBA,
+ .name = "EGL_BIND_TO_TEXTURE_RGBA",
+ .cardinality = COUNT_OF(egl_enum_boolean),
+ .values = egl_enum_boolean,
+ },
+ {
+ .id = EGL_MAX_PBUFFER_WIDTH,
+ .name = "EGL_MAX_PBUFFER_WIDTH",
+ },
+ {
+ .id = EGL_MAX_PBUFFER_HEIGHT,
+ .name = "EGL_MAX_PBUFFER_HEIGHT",
+ },
+ {
+ .id = EGL_MAX_PBUFFER_PIXELS,
+ .name = "EGL_MAX_PBUFFER_PIXELS",
+ },
+ {
+ .id = EGL_TRANSPARENT_RED_VALUE,
+ .name = "EGL_TRANSPARENT_RED_VALUE",
+ },
+ {
+ .id = EGL_TRANSPARENT_GREEN_VALUE,
+ .name = "EGL_TRANSPARENT_GREEN_VALUE",
+ },
+ {
+ .id = EGL_TRANSPARENT_BLUE_VALUE,
+ .name = "EGL_TRANSPARENT_BLUE_VALUE",
+ },
+ {
+ .id = EGL_SAMPLE_BUFFERS,
+ .name = "EGL_SAMPLE_BUFFERS",
+ },
+ {
+ .id = EGL_SAMPLES,
+ .name = "EGL_SAMPLES",
+ },
+ {
+ .id = EGL_LEVEL,
+ .name = "EGL_LEVEL",
+ },
+ {
+ .id = EGL_MAX_SWAP_INTERVAL,
+ .name = "EGL_MAX_SWAP_INTERVAL",
+ },
+ {
+ .id = EGL_MIN_SWAP_INTERVAL,
+ .name = "EGL_MIN_SWAP_INTERVAL",
+ },
+ {
+ .id = EGL_SURFACE_TYPE,
+ .name = "EGL_SURFACE_TYPE",
+ .cardinality = -(int32_t) COUNT_OF(egl_enum_surface_type),
+ .values = egl_enum_surface_type,
+ },
+ {
+ .id = EGL_RENDERABLE_TYPE,
+ .name = "EGL_RENDERABLE_TYPE",
+ .cardinality = -(int32_t) COUNT_OF(egl_enum_renderable_type),
+ .values = egl_enum_renderable_type,
+ },
+ {
+ .id = EGL_CONFORMANT,
+ .name = "EGL_CONFORMANT",
+ .cardinality = -(int32_t) COUNT_OF(egl_enum_conformant),
+ .values = egl_enum_conformant,
+ },
+ {
+ .id = EGL_TRANSPARENT_TYPE,
+ .name = "EGL_TRANSPARENT_TYPE",
+ .cardinality = COUNT_OF(egl_enum_transparency),
+ .values = egl_enum_transparency,
+ },
+ {
+ .id = EGL_COLOR_BUFFER_TYPE,
+ .name = "EGL_COLOR_BUFFER_TYPE",
+ .cardinality = COUNT_OF(egl_enum_color_buffer),
+ .values = egl_enum_color_buffer,
+ },
+};
+
+void report_gles_attributes(void) {
+ void* libEGL = NULL;
+ EGLConfig* configs = NULL;
+ EGLDisplay display = EGL_NO_DISPLAY;
+ EGLSurface surface = EGL_NO_SURFACE;
+ EGLContext context = EGL_NO_CONTEXT;
+ EGLBoolean egl_init_status = EGL_FALSE;
+ EGLBoolean egl_make_current_status = EGL_FALSE;
+ EGLBoolean egl_status;
+
+ libEGL = dlopen("libEGL.so", RTLD_LAZY | RTLD_LOCAL);
+
+ display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (display == EGL_NO_DISPLAY) {
+ fprintf(stderr, "failed to get default EGL display\n");
+ goto cleanup;
+ }
+
+ EGLint egl_major = 0, egl_minor = 0;
+ egl_init_status = eglInitialize(display, &egl_major, &egl_minor);
+ if (egl_init_status != EGL_TRUE) {
+ fprintf(stderr, "failed to initialize EGL display connection\n");
+ goto cleanup;
+ }
+ printf("initialized display connection with EGL %d.%d\n", (int) egl_major, (int) egl_minor);
+
+ EGLint configs_count = 0;
+ egl_status = eglGetConfigs(display, NULL, 0, &configs_count);
+ if (egl_status != EGL_TRUE) {
+ fprintf(stderr, "failed to get the number of EGL frame buffer configurations\n");
+ goto cleanup;
+ }
+
+ configs = (EGLConfig*) malloc(configs_count * sizeof(EGLConfig));
+ if (configs == NULL) {
+ fprintf(stderr, "failed to allocate %zu bytes for %d frame buffer configurations\n",
+ configs_count * sizeof(EGLConfig), configs_count);
+ goto cleanup;
+ }
+
+ egl_status = eglGetConfigs(display, configs, configs_count, &configs_count);
+ if (egl_status != EGL_TRUE || configs_count == 0) {
+ fprintf(stderr, "failed to get EGL frame buffer configurations\n");
+ goto cleanup;
+ }
+
+ printf("EGL framebuffer configurations:\n");
+ for (EGLint i = 0; i < configs_count; i++) {
+ printf("\tConfiguration #%d:\n", (int) i);
+ for (size_t n = 0; n < COUNT_OF(egl_config_attributes); n++) {
+ EGLint value = 0;
+ egl_status = eglGetConfigAttrib(display, configs[i], egl_config_attributes[n].id, &value);
+ if (egl_config_attributes[n].cardinality == 0) {
+ printf("\t\t%s: %d\n", egl_config_attributes[n].name, (int) value);
+ } else if (egl_config_attributes[n].cardinality > 0) {
+ /* Enumeration */
+ bool known_value = false;
+ for (size_t k = 0; k < (size_t) egl_config_attributes[n].cardinality; k++) {
+ if (egl_config_attributes[n].values[k].id == value) {
+ printf("\t\t%s: %s\n", egl_config_attributes[n].name, egl_config_attributes[n].values[k].name);
+ known_value = true;
+ break;
+ }
+ }
+ if (!known_value) {
+ printf("\t\t%s: unknown (%d)\n", egl_config_attributes[n].name, value);
+ }
+ } else {
+ /* Bitfield */
+ printf("\t\t%s: ", egl_config_attributes[n].name);
+ if (value == 0) {
+ printf("none\n");
+ } else {
+ for (size_t k = 0; k < (size_t) -egl_config_attributes[n].cardinality; k++) {
+ if (egl_config_attributes[n].values[k].id & value) {
+ value &= ~egl_config_attributes[n].values[k].id;
+ if (value != 0) {
+ printf("%s | ", egl_config_attributes[n].values[k].name);
+ } else {
+ printf("%s\n", egl_config_attributes[n].values[k].name);
+ }
+ }
+ }
+ if (value != 0) {
+ printf("0x%08X\n", (int) value);
+ }
+ }
+ }
+ }
+ }
+
+ EGLint const config_attributes[] = {
+ EGL_BIND_TO_TEXTURE_RGBA, EGL_TRUE,
+ EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL_CONFORMANT, EGL_OPENGL_ES2_BIT,
+ EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
+ EGL_NONE,
+ };
+ EGLConfig config = NULL;
+ EGLint config_count = 0;
+ egl_status = eglChooseConfig(display, config_attributes, &config, 1, &config_count);
+ if (egl_status != EGL_TRUE || config_count == 0 || config == NULL) {
+ fprintf(stderr, "failed to find EGL frame buffer configuration that match required attributes\n");
+ goto cleanup;
+ }
+
+ EGLint const surface_attributes[] = {
+ EGL_HEIGHT, 1,
+ EGL_WIDTH, 1,
+ EGL_TEXTURE_FORMAT, EGL_TEXTURE_RGBA,
+ EGL_TEXTURE_TARGET, EGL_TEXTURE_2D,
+ EGL_NONE,
+ };
+ surface = eglCreatePbufferSurface(display, config, surface_attributes);
+ if (surface == EGL_NO_SURFACE) {
+ fprintf(stderr, "failed to create PBuffer surface\n");
+ goto cleanup;
+ }
+
+ EGLint const context_attributes[] = {
+ EGL_CONTEXT_CLIENT_VERSION, 2,
+ EGL_NONE,
+ };
+ context = eglCreateContext(display, config, EGL_NO_CONTEXT, context_attributes);
+ if (context == EGL_NO_CONTEXT) {
+ fprintf(stderr, "failed to create OpenGL ES context\n");
+ goto cleanup;
+ }
+
+ egl_make_current_status = eglMakeCurrent(display, surface, surface, context);
+ if (egl_make_current_status != EGL_TRUE) {
+ fprintf(stderr, "failed to attach OpenGL ES rendering context\n");
+ goto cleanup;
+ }
+
+ printf("OpenGL ES Attributes:\n");
+ printf("\t%s: \"%s\"\n", "GL_VENDOR", glGetString(GL_VENDOR));
+ printf("\t%s: \"%s\"\n", "GL_RENDERER", glGetString(GL_RENDERER));
+ printf("\t%s: \"%s\"\n", "GL_VERSION", glGetString(GL_VERSION));
+ printf("\t%s: \"%s\"\n", "GL_SHADING_LANGUAGE_VERSION", glGetString(GL_SHADING_LANGUAGE_VERSION));
+ printf("\t%s: \"%s\"\n", "GL_EXTENSIONS", glGetString(GL_EXTENSIONS));
+
+cleanup:
+ if (egl_make_current_status == EGL_TRUE) {
+ eglMakeCurrent(display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ }
+ if (context != EGL_NO_CONTEXT) {
+ eglDestroyContext(display, context);
+ }
+ if (surface != EGL_NO_SURFACE) {
+ eglDestroySurface(display, surface);
+ }
+ if (egl_init_status == EGL_TRUE) {
+ eglTerminate(display);
+ }
+ free(configs);
+
+ if (libEGL != NULL) {
+ dlclose(libEGL);
+ }
+}
+
+int main(int argc, char** argv) {
+ report_gles_attributes();
+ return 0;
+}
diff --git a/tools/isa-info.c b/tools/isa-info.c
index a35fdf8..97d1e6d 100644
--- a/tools/isa-info.c
+++ b/tools/isa-info.c
@@ -9,164 +9,145 @@ int main(int argc, char** argv) {
printf("Scalar instructions:\n");
#if CPUINFO_ARCH_X86
- printf("\tx87 FPU: %s\n", cpuinfo_isa.fpu ? "yes" : "no");
- printf("\tCMOV: %s\n", cpuinfo_isa.cmov ? "yes" : "no");
+ printf("\tx87 FPU: %s\n", cpuinfo_has_x86_fpu() ? "yes" : "no");
+ printf("\tCMOV: %s\n", cpuinfo_has_x86_cmov() ? "yes" : "no");
#endif
-#if CPUINFO_ARCH_X86_64
- printf("\tLAHF/SAHF: %s\n", cpuinfo_isa.lahf_sahf ? "yes" : "no");
-#endif
- printf("\tLZCNT: %s\n", cpuinfo_isa.lzcnt ? "yes" : "no");
- printf("\tPOPCNT: %s\n", cpuinfo_isa.popcnt ? "yes" : "no");
- printf("\tTBM: %s\n", cpuinfo_isa.tbm ? "yes" : "no");
- printf("\tBMI: %s\n", cpuinfo_isa.bmi ? "yes" : "no");
- printf("\tBMI2: %s\n", cpuinfo_isa.bmi2 ? "yes" : "no");
- printf("\tADCX/ADOX: %s\n", cpuinfo_isa.adx ? "yes" : "no");
+ printf("\tLAHF/SAHF: %s\n", cpuinfo_has_x86_lahf_sahf() ? "yes" : "no");
+ printf("\tLZCNT: %s\n", cpuinfo_has_x86_lzcnt() ? "yes" : "no");
+ printf("\tPOPCNT: %s\n", cpuinfo_has_x86_popcnt() ? "yes" : "no");
+ printf("\tTBM: %s\n", cpuinfo_has_x86_tbm() ? "yes" : "no");
+ printf("\tBMI: %s\n", cpuinfo_has_x86_bmi() ? "yes" : "no");
+ printf("\tBMI2: %s\n", cpuinfo_has_x86_bmi2() ? "yes" : "no");
+ printf("\tADCX/ADOX: %s\n", cpuinfo_has_x86_adx() ? "yes" : "no");
printf("Memory instructions:\n");
- printf("\tMOVBE: %s\n", cpuinfo_isa.movbe ? "yes" : "no");
- printf("\tPREFETCH: %s\n", cpuinfo_isa.prefetch ? "yes" : "no");
- printf("\tPREFETCHW: %s\n", cpuinfo_isa.prefetchw ? "yes" : "no");
- printf("\tPREFETCHWT1: %s\n", cpuinfo_isa.prefetchwt1 ? "yes" : "no");
- printf("\tCLZERO: %s\n", cpuinfo_isa.clzero ? "yes" : "no");
+ printf("\tMOVBE: %s\n", cpuinfo_has_x86_movbe() ? "yes" : "no");
+ printf("\tPREFETCH: %s\n", cpuinfo_has_x86_prefetch() ? "yes" : "no");
+ printf("\tPREFETCHW: %s\n", cpuinfo_has_x86_prefetchw() ? "yes" : "no");
+ printf("\tPREFETCHWT1: %s\n", cpuinfo_has_x86_prefetchwt1() ? "yes" : "no");
+ printf("\tCLZERO: %s\n", cpuinfo_has_x86_clzero() ? "yes" : "no");
printf("SIMD extensions:\n");
-#if CPUINFO_ARCH_X86
- printf("\tMMX: %s\n", cpuinfo_isa.mmx ? "yes" : "no");
- printf("\tMMX+: %s\n", cpuinfo_isa.mmx_plus ? "yes" : "no");
- printf("\tEMMX: %s\n", cpuinfo_isa.emmx ? "yes" : "no");
-#endif
- printf("\t3dnow!: %s\n", cpuinfo_isa.three_d_now ? "yes" : "no");
- printf("\t3dnow!+: %s\n", cpuinfo_isa.three_d_now_plus ? "yes" : "no");
-#if CPUINFO_ARCH_X86
- printf("\t3dnow! Geode: %s\n", cpuinfo_isa.three_d_now_geode ? "yes" : "no");
- printf("\tDAZ: %s\n", cpuinfo_isa.daz ? "yes" : "no");
- printf("\tSSE: %s\n", cpuinfo_isa.sse ? "yes" : "no");
- printf("\tSSE2: %s\n", cpuinfo_isa.sse2 ? "yes" : "no");
-#endif
- printf("\tSSE3: %s\n", cpuinfo_isa.sse3 ? "yes" : "no");
- printf("\tSSSE3: %s\n", cpuinfo_isa.ssse3 ? "yes" : "no");
- printf("\tSSE4.1: %s\n", cpuinfo_isa.sse4_1 ? "yes" : "no");
- printf("\tSSE4.2: %s\n", cpuinfo_isa.sse4_2 ? "yes" : "no");
- printf("\tSSE4a: %s\n", cpuinfo_isa.sse4a ? "yes" : "no");
- printf("\tMisaligned SSE: %s\n", cpuinfo_isa.misaligned_sse ? "yes" : "no");
- printf("\tAVX: %s\n", cpuinfo_isa.avx ? "yes" : "no");
- printf("\tFMA3: %s\n", cpuinfo_isa.fma3 ? "yes" : "no");
- printf("\tFMA4: %s\n", cpuinfo_isa.fma4 ? "yes" : "no");
- printf("\tXOP: %s\n", cpuinfo_isa.xop ? "yes" : "no");
- printf("\tF16C: %s\n", cpuinfo_isa.f16c ? "yes" : "no");
- printf("\tAVX2: %s\n", cpuinfo_isa.avx2 ? "yes" : "no");
- printf("\tAVX512F: %s\n", cpuinfo_isa.avx512f ? "yes" : "no");
- printf("\tAVX512PF: %s\n", cpuinfo_isa.avx512pf ? "yes" : "no");
- printf("\tAVX512ER: %s\n", cpuinfo_isa.avx512er ? "yes" : "no");
- printf("\tAVX512CD: %s\n", cpuinfo_isa.avx512cd ? "yes" : "no");
- printf("\tAVX512DQ: %s\n", cpuinfo_isa.avx512dq ? "yes" : "no");
- printf("\tAVX512BW: %s\n", cpuinfo_isa.avx512bw ? "yes" : "no");
- printf("\tAVX512VL: %s\n", cpuinfo_isa.avx512vl ? "yes" : "no");
- printf("\tAVX512IFMA: %s\n", cpuinfo_isa.avx512ifma ? "yes" : "no");
- printf("\tAVX512VBMI: %s\n", cpuinfo_isa.avx512vbmi ? "yes" : "no");
- printf("\tAVX512VPOPCNTDQ: %s\n", cpuinfo_isa.avx512vpopcntdq ? "yes" : "no");
- printf("\tAVX512_4VNNIW: %s\n", cpuinfo_isa.avx512_4vnniw ? "yes" : "no");
- printf("\tAVX512_4FMAPS: %s\n", cpuinfo_isa.avx512_4fmaps ? "yes" : "no");
+ printf("\tMMX: %s\n", cpuinfo_has_x86_mmx() ? "yes" : "no");
+ printf("\tMMX+: %s\n", cpuinfo_has_x86_mmx_plus() ? "yes" : "no");
+ printf("\t3dnow!: %s\n", cpuinfo_has_x86_3dnow() ? "yes" : "no");
+ printf("\t3dnow!+: %s\n", cpuinfo_has_x86_3dnow_plus() ? "yes" : "no");
+ printf("\t3dnow! Geode: %s\n", cpuinfo_has_x86_3dnow_geode() ? "yes" : "no");
+ printf("\tDAZ: %s\n", cpuinfo_has_x86_daz() ? "yes" : "no");
+ printf("\tSSE: %s\n", cpuinfo_has_x86_sse() ? "yes" : "no");
+ printf("\tSSE2: %s\n", cpuinfo_has_x86_sse2() ? "yes" : "no");
+ printf("\tSSE3: %s\n", cpuinfo_has_x86_sse3() ? "yes" : "no");
+ printf("\tSSSE3: %s\n", cpuinfo_has_x86_ssse3() ? "yes" : "no");
+ printf("\tSSE4.1: %s\n", cpuinfo_has_x86_sse4_1() ? "yes" : "no");
+ printf("\tSSE4.2: %s\n", cpuinfo_has_x86_sse4_2() ? "yes" : "no");
+ printf("\tSSE4a: %s\n", cpuinfo_has_x86_sse4a() ? "yes" : "no");
+ printf("\tMisaligned SSE: %s\n", cpuinfo_has_x86_misaligned_sse() ? "yes" : "no");
+ printf("\tAVX: %s\n", cpuinfo_has_x86_avx() ? "yes" : "no");
+ printf("\tFMA3: %s\n", cpuinfo_has_x86_fma3() ? "yes" : "no");
+ printf("\tFMA4: %s\n", cpuinfo_has_x86_fma4() ? "yes" : "no");
+ printf("\tXOP: %s\n", cpuinfo_has_x86_xop() ? "yes" : "no");
+ printf("\tF16C: %s\n", cpuinfo_has_x86_f16c() ? "yes" : "no");
+ printf("\tAVX2: %s\n", cpuinfo_has_x86_avx2() ? "yes" : "no");
+ printf("\tAVX512F: %s\n", cpuinfo_has_x86_avx512f() ? "yes" : "no");
+ printf("\tAVX512PF: %s\n", cpuinfo_has_x86_avx512pf() ? "yes" : "no");
+ printf("\tAVX512ER: %s\n", cpuinfo_has_x86_avx512er() ? "yes" : "no");
+ printf("\tAVX512CD: %s\n", cpuinfo_has_x86_avx512cd() ? "yes" : "no");
+ printf("\tAVX512DQ: %s\n", cpuinfo_has_x86_avx512dq() ? "yes" : "no");
+ printf("\tAVX512BW: %s\n", cpuinfo_has_x86_avx512bw() ? "yes" : "no");
+ printf("\tAVX512VL: %s\n", cpuinfo_has_x86_avx512vl() ? "yes" : "no");
+ printf("\tAVX512IFMA: %s\n", cpuinfo_has_x86_avx512ifma() ? "yes" : "no");
+ printf("\tAVX512VBMI: %s\n", cpuinfo_has_x86_avx512vbmi() ? "yes" : "no");
+ printf("\tAVX512VPOPCNTDQ: %s\n", cpuinfo_has_x86_avx512vpopcntdq() ? "yes" : "no");
+ printf("\tAVX512_4VNNIW: %s\n", cpuinfo_has_x86_avx512_4vnniw() ? "yes" : "no");
+ printf("\tAVX512_4FMAPS: %s\n", cpuinfo_has_x86_avx512_4fmaps() ? "yes" : "no");
printf("Multi-threading extensions:\n");
- printf("\tMONITOR/MWAIT: %s\n", cpuinfo_isa.mwait ? "yes" : "no");
- printf("\tMONITORX/MWAITX: %s\n", cpuinfo_isa.mwaitx ? "yes" : "no");
+ printf("\tMONITOR/MWAIT: %s\n", cpuinfo_has_x86_mwait() ? "yes" : "no");
+ printf("\tMONITORX/MWAITX: %s\n", cpuinfo_has_x86_mwaitx() ? "yes" : "no");
#if CPUINFO_ARCH_X86
- printf("\tCMPXCHG8B: %s\n", cpuinfo_isa.cmpxchg8b ? "yes" : "no");
+ printf("\tCMPXCHG8B: %s\n", cpuinfo_has_x86_cmpxchg8b() ? "yes" : "no");
#endif
- printf("\tCMPXCHG16B: %s\n", cpuinfo_isa.cmpxchg16b ? "yes" : "no");
- printf("\tHLE: %s\n", cpuinfo_isa.hle ? "yes" : "no");
- printf("\tRTM: %s\n", cpuinfo_isa.rtm ? "yes" : "no");
- printf("\tXTEST: %s\n", cpuinfo_isa.xtest ? "yes" : "no");
- printf("\tRDPID: %s\n", cpuinfo_isa.rdpid ? "yes" : "no");
+ printf("\tCMPXCHG16B: %s\n", cpuinfo_has_x86_cmpxchg16b() ? "yes" : "no");
+ printf("\tHLE: %s\n", cpuinfo_has_x86_hle() ? "yes" : "no");
+ printf("\tRTM: %s\n", cpuinfo_has_x86_rtm() ? "yes" : "no");
+ printf("\tXTEST: %s\n", cpuinfo_has_x86_xtest() ? "yes" : "no");
+ printf("\tRDPID: %s\n", cpuinfo_has_x86_rdpid() ? "yes" : "no");
printf("Cryptography extensions:\n");
- printf("\tAES: %s\n", cpuinfo_isa.aes ? "yes" : "no");
- printf("\tPCLMULQDQ: %s\n", cpuinfo_isa.pclmulqdq ? "yes" : "no");
- printf("\tRDRAND: %s\n", cpuinfo_isa.rdrand ? "yes" : "no");
- printf("\tRDSEED: %s\n", cpuinfo_isa.rdseed ? "yes" : "no");
- printf("\tSHA: %s\n", cpuinfo_isa.sha ? "yes" : "no");
- printf("\tPadlock RNG: %s\n", cpuinfo_isa.rng ? "yes" : "no");
- printf("\tPadlock ACE: %s\n", cpuinfo_isa.ace ? "yes" : "no");
- printf("\tPadlock ACE 2: %s\n", cpuinfo_isa.ace2 ? "yes" : "no");
- printf("\tPadlock PHE: %s\n", cpuinfo_isa.phe ? "yes" : "no");
- printf("\tPadlock PMM: %s\n", cpuinfo_isa.pmm ? "yes" : "no");
+ printf("\tAES: %s\n", cpuinfo_has_x86_aes() ? "yes" : "no");
+ printf("\tPCLMULQDQ: %s\n", cpuinfo_has_x86_pclmulqdq() ? "yes" : "no");
+ printf("\tRDRAND: %s\n", cpuinfo_has_x86_rdrand() ? "yes" : "no");
+ printf("\tRDSEED: %s\n", cpuinfo_has_x86_rdseed() ? "yes" : "no");
+ printf("\tSHA: %s\n", cpuinfo_has_x86_sha() ? "yes" : "no");
printf("Profiling instructions:\n");
#if CPUINFO_ARCH_X86
- printf("\tRDTSC: %s\n", cpuinfo_isa.rdtsc ? "yes" : "no");
+ printf("\tRDTSC: %s\n", cpuinfo_has_x86_rdtsc() ? "yes" : "no");
#endif
- printf("\tRDTSCP: %s\n", cpuinfo_isa.rdtscp ? "yes" : "no");
- printf("\tLWP: %s\n", cpuinfo_isa.lwp ? "yes" : "no");
- printf("\tMPX: %s\n", cpuinfo_isa.mpx ? "yes" : "no");
+ printf("\tRDTSCP: %s\n", cpuinfo_has_x86_rdtscp() ? "yes" : "no");
+ printf("\tMPX: %s\n", cpuinfo_has_x86_mpx() ? "yes" : "no");
printf("System instructions:\n");
- printf("\tSYSENTER/SYSEXIT: %s\n", cpuinfo_isa.sysenter ? "yes" : "no");
-#if CPUINFO_ARCH_X86
- printf("\tSYSCALL/SYSRET: %s\n", cpuinfo_isa.syscall ? "yes" : "no");
-#endif
- printf("\tRDMSR/WRMSR: %s\n", cpuinfo_isa.msr ? "yes" : "no");
- printf("\tCLFLUSH: %s\n", cpuinfo_isa.clflush ? "yes" : "no");
- printf("\tCLFLUSHOPT: %s\n", cpuinfo_isa.clflushopt ? "yes" : "no");
- printf("\tCLWB: %s\n", cpuinfo_isa.clwb ? "yes" : "no");
- printf("\tFXSAVE/FXSTOR: %s\n", cpuinfo_isa.fxsave ? "yes" : "no");
- printf("\tXSAVE/XSTOR: %s\n", cpuinfo_isa.xsave ? "yes" : "no");
- printf("\tFS/GS Base: %s\n", cpuinfo_isa.fs_gs_base ? "yes" : "no");
+ printf("\tCLWB: %s\n", cpuinfo_has_x86_clwb() ? "yes" : "no");
+ printf("\tFXSAVE/FXSTOR: %s\n", cpuinfo_has_x86_fxsave() ? "yes" : "no");
+ printf("\tXSAVE/XSTOR: %s\n", cpuinfo_has_x86_xsave() ? "yes" : "no");
#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */
#if CPUINFO_ARCH_ARM
printf("Instruction sets:\n");
- printf("\tThumb: %s\n", cpuinfo_isa.thumb ? "yes" : "no");
- printf("\tThumb 2: %s\n", cpuinfo_isa.thumb2 ? "yes" : "no");
- printf("\tThumb EE: %s\n", cpuinfo_isa.thumbee ? "yes" : "no");
- printf("\tJazelle: %s\n", cpuinfo_isa.jazelle ? "yes" : "no");
- printf("\tARMv5E: %s\n", cpuinfo_isa.armv5e ? "yes" : "no");
- printf("\tARMv6: %s\n", cpuinfo_isa.armv6 ? "yes" : "no");
- printf("\tARMv6-K: %s\n", cpuinfo_isa.armv6k ? "yes" : "no");
- printf("\tARMv7: %s\n", cpuinfo_isa.armv7 ? "yes" : "no");
- printf("\tARMv7 MP: %s\n", cpuinfo_isa.armv7mp ? "yes" : "no");
- printf("\tIDIV: %s\n", cpuinfo_isa.idiv ? "yes" : "no");
+ printf("\tThumb: %s\n", cpuinfo_has_arm_thumb() ? "yes" : "no");
+ printf("\tThumb 2: %s\n", cpuinfo_has_arm_thumb2() ? "yes" : "no");
+ printf("\tARMv5E: %s\n", cpuinfo_has_arm_v5e() ? "yes" : "no");
+ printf("\tARMv6: %s\n", cpuinfo_has_arm_v6() ? "yes" : "no");
+ printf("\tARMv6-K: %s\n", cpuinfo_has_arm_v6k() ? "yes" : "no");
+ printf("\tARMv7: %s\n", cpuinfo_has_arm_v7() ? "yes" : "no");
+ printf("\tARMv7 MP: %s\n", cpuinfo_has_arm_v7mp() ? "yes" : "no");
+ printf("\tIDIV: %s\n", cpuinfo_has_arm_idiv() ? "yes" : "no");
printf("Floating-Point support:\n");
- printf("\tVFPv2: %s\n", cpuinfo_isa.vfpv2 ? "yes" : "no");
- printf("\tVFPv3: %s\n", cpuinfo_isa.vfpv3 ? "yes" : "no");
- printf("\tD32: %s\n", cpuinfo_isa.d32 ? "yes" : "no");
- printf("\tFP16: %s\n", cpuinfo_isa.fp16 ? "yes" : "no");
- printf("\tFMA: %s\n", cpuinfo_isa.fma ? "yes" : "no");
+ printf("\tVFPv2: %s\n", cpuinfo_has_arm_vfpv2() ? "yes" : "no");
+ printf("\tVFPv3: %s\n", cpuinfo_has_arm_vfpv3() ? "yes" : "no");
+ printf("\tVFPv3+D32: %s\n", cpuinfo_has_arm_vfpv3_d32() ? "yes" : "no");
+ printf("\tVFPv3+FP16: %s\n", cpuinfo_has_arm_vfpv3_fp16() ? "yes" : "no");
+ printf("\tVFPv3+FP16+D32: %s\n", cpuinfo_has_arm_vfpv3_fp16_d32() ? "yes" : "no");
+ printf("\tVFPv4: %s\n", cpuinfo_has_arm_vfpv4() ? "yes" : "no");
+ printf("\tVFPv4+D32: %s\n", cpuinfo_has_arm_vfpv4_d32() ? "yes" : "no");
printf("SIMD extensions:\n");
- printf("\tWMMX: %s\n", cpuinfo_isa.wmmx ? "yes" : "no");
- printf("\tWMMX 2: %s\n", cpuinfo_isa.wmmx2 ? "yes" : "no");
- printf("\tNEON: %s\n", cpuinfo_isa.neon ? "yes" : "no");
+ printf("\tWMMX: %s\n", cpuinfo_has_arm_wmmx() ? "yes" : "no");
+ printf("\tWMMX 2: %s\n", cpuinfo_has_arm_wmmx2() ? "yes" : "no");
+ printf("\tNEON: %s\n", cpuinfo_has_arm_neon() ? "yes" : "no");
+ printf("\tNEON-FP16: %s\n", cpuinfo_has_arm_neon_fp16() ? "yes" : "no");
+ printf("\tNEON-FMA: %s\n", cpuinfo_has_arm_neon_fma() ? "yes" : "no");
printf("Cryptography extensions:\n");
- printf("\tAES: %s\n", cpuinfo_isa.aes ? "yes" : "no");
- printf("\tSHA1: %s\n", cpuinfo_isa.sha1 ? "yes" : "no");
- printf("\tSHA2: %s\n", cpuinfo_isa.sha2 ? "yes" : "no");
- printf("\tPMULL: %s\n", cpuinfo_isa.pmull ? "yes" : "no");
- printf("\tCRC32: %s\n", cpuinfo_isa.crc32 ? "yes" : "no");
+ printf("\tAES: %s\n", cpuinfo_has_arm_aes() ? "yes" : "no");
+ printf("\tSHA1: %s\n", cpuinfo_has_arm_sha1() ? "yes" : "no");
+ printf("\tSHA2: %s\n", cpuinfo_has_arm_sha2() ? "yes" : "no");
+ printf("\tPMULL: %s\n", cpuinfo_has_arm_pmull() ? "yes" : "no");
+ printf("\tCRC32: %s\n", cpuinfo_has_arm_crc32() ? "yes" : "no");
#endif /* CPUINFO_ARCH_ARM */
#if CPUINFO_ARCH_ARM64
printf("Instruction sets:\n");
- printf("\tARM v8.1 atomics: %s\n", cpuinfo_isa.atomics ? "yes" : "no");
- printf("\tARM v8.1 SQRDMLxH: %s\n", cpuinfo_isa.rdm ? "yes" : "no");
- printf("\tARM v8.2 FP16 arithmetics: %s\n", cpuinfo_isa.fp16arith ? "yes" : "no");
- printf("\tARM v8.3 JS conversion: %s\n", cpuinfo_isa.jscvt ? "yes" : "no");
- printf("\tARM v8.3 complex FMA: %s\n", cpuinfo_isa.fcma ? "yes" : "no");
+ printf("\tARM v8.1 atomics: %s\n", cpuinfo_has_arm_atomics() ? "yes" : "no");
+ printf("\tARM v8.1 SQRDMLxH: %s\n", cpuinfo_has_arm_neon_rdm() ? "yes" : "no");
+ printf("\tARM v8.2 FP16 arithmetics: %s\n", cpuinfo_has_arm_fp16_arith() ? "yes" : "no");
+ printf("\tARM v8.3 JS conversion: %s\n", cpuinfo_has_arm_jscvt() ? "yes" : "no");
+ printf("\tARM v8.3 complex FMA: %s\n", cpuinfo_has_arm_fcma() ? "yes" : "no");
printf("Cryptography extensions:\n");
- printf("\tAES: %s\n", cpuinfo_isa.aes ? "yes" : "no");
- printf("\tSHA1: %s\n", cpuinfo_isa.sha1 ? "yes" : "no");
- printf("\tSHA2: %s\n", cpuinfo_isa.sha2 ? "yes" : "no");
- printf("\tPMULL: %s\n", cpuinfo_isa.pmull ? "yes" : "no");
- printf("\tCRC32: %s\n", cpuinfo_isa.crc32 ? "yes" : "no");
+ printf("\tAES: %s\n", cpuinfo_has_arm_aes() ? "yes" : "no");
+ printf("\tSHA1: %s\n", cpuinfo_has_arm_sha1() ? "yes" : "no");
+ printf("\tSHA2: %s\n", cpuinfo_has_arm_sha2() ? "yes" : "no");
+ printf("\tPMULL: %s\n", cpuinfo_has_arm_pmull() ? "yes" : "no");
+ printf("\tCRC32: %s\n", cpuinfo_has_arm_crc32() ? "yes" : "no");
#endif
}