aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/cpuinfo.h43
-rw-r--r--src/api.c18
-rw-r--r--src/api.h2
-rw-r--r--src/x86/linux/init.c36
-rw-r--r--src/x86/mach/init.c29
-rw-r--r--src/x86/windows/init.c30
-rw-r--r--test/init.cc300
7 files changed, 450 insertions, 8 deletions
diff --git a/include/cpuinfo.h b/include/cpuinfo.h
index bdfd0f0..b02104e 100644
--- a/include/cpuinfo.h
+++ b/include/cpuinfo.h
@@ -420,6 +420,8 @@ struct cpuinfo_processor {
uint32_t smt_id;
/** Core containing this logical processor */
const struct cpuinfo_core* core;
+ /** Cluster of cores containing this logical processor */
+ const struct cpuinfo_cluster* cluster;
/** Physical package containing this logical processor */
const struct cpuinfo_package* package;
#if defined(__linux__)
@@ -458,13 +460,15 @@ struct cpuinfo_processor {
};
struct cpuinfo_core {
- /** Index of the first logical processor on this core */
+ /** Index of the first logical processor on this core. */
uint32_t processor_start;
/** Number of logical processors on this core */
uint32_t processor_count;
/** Core ID within a package */
uint32_t core_id;
- /** Physical package containing this core */
+ /** Cluster containing this core */
+ const struct cpuinfo_cluster* cluster;
+ /** Physical package containing this core. */
const struct cpuinfo_package* package;
/** Vendor of the CPU microarchitecture for this core */
enum cpuinfo_vendor vendor;
@@ -481,6 +485,34 @@ struct cpuinfo_core {
uint64_t frequency;
};
+struct cpuinfo_cluster {
+ /** Index of the first logical processor in the cluster */
+ uint32_t processor_start;
+ /** Number of logical processors in the cluster */
+ uint32_t processor_count;
+ /** Index of the first core in the cluster */
+ uint32_t core_start;
+ /** Number of cores on the cluster */
+ uint32_t core_count;
+ /** Cluster ID within a package */
+ uint32_t cluster_id;
+ /** Physical package containing the cluster */
+ const struct cpuinfo_package* package;
+ /** CPU microarchitecture vendor of the cores in the cluster */
+ enum cpuinfo_vendor vendor;
+ /** CPU microarchitecture of the cores in the cluster */
+ enum cpuinfo_uarch uarch;
+#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+ /** Value of CPUID leaf 1 EAX register of the cores in the cluster */
+ uint32_t cpuid;
+#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+ /** Value of Main ID Register (MIDR) of the cores in the cluster */
+ uint32_t midr;
+#endif
+ /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
+ uint64_t frequency;
+};
+
#define CPUINFO_PACKAGE_NAME_MAX 48
#define CPUINFO_GPU_NAME_MAX 64
@@ -499,6 +531,10 @@ struct cpuinfo_package {
uint32_t core_start;
/** Number of cores on this physical package */
uint32_t core_count;
+ /** Index of the first cluster of cores on this physical package */
+ uint32_t cluster_start;
+ /** Number of clusters of cores on this physical package */
+ uint32_t cluster_count;
};
#ifdef __cplusplus
@@ -1570,6 +1606,7 @@ static inline bool cpuinfo_has_arm_crc32(void) {
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
+const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
@@ -1579,6 +1616,7 @@ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index);
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
+const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
@@ -1588,6 +1626,7 @@ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);
uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
+uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
diff --git a/src/api.c b/src/api.c
index 1c7fd29..1ffdb8c 100644
--- a/src/api.c
+++ b/src/api.c
@@ -6,11 +6,13 @@
struct cpuinfo_processor* cpuinfo_processors = NULL;
struct cpuinfo_core* cpuinfo_cores = NULL;
+struct cpuinfo_cluster* cpuinfo_clusters = NULL;
struct cpuinfo_package* cpuinfo_packages = NULL;
struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max] = { NULL };
uint32_t cpuinfo_processors_count = 0;
uint32_t cpuinfo_cores_count = 0;
+uint32_t cpuinfo_clusters_count = 0;
uint32_t cpuinfo_packages_count = 0;
uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max] = { 0 };
@@ -23,6 +25,10 @@ const struct cpuinfo_core* cpuinfo_get_cores(void) {
return cpuinfo_cores;
}
+const struct cpuinfo_cluster* cpuinfo_get_clusters(void) {
+ return cpuinfo_clusters;
+}
+
const struct cpuinfo_package* cpuinfo_get_packages(void) {
return cpuinfo_packages;
}
@@ -43,6 +49,14 @@ const struct cpuinfo_core* cpuinfo_get_core(uint32_t index) {
}
}
+const struct cpuinfo_cluster* cpuinfo_get_cluster(uint32_t index) {
+ if (index < cpuinfo_clusters_count) {
+ return cpuinfo_clusters + index;
+ } else {
+ return NULL;
+ }
+}
+
const struct cpuinfo_package* cpuinfo_get_package(uint32_t index) {
if (index < cpuinfo_packages_count) {
return cpuinfo_packages + index;
@@ -59,6 +73,10 @@ uint32_t cpuinfo_get_cores_count(void) {
return cpuinfo_cores_count;
}
+uint32_t cpuinfo_get_clusters_count(void) {
+ return cpuinfo_clusters_count;
+}
+
uint32_t cpuinfo_get_packages_count(void) {
return cpuinfo_packages_count;
}
diff --git a/src/api.h b/src/api.h
index 49985ed..9ba406f 100644
--- a/src/api.h
+++ b/src/api.h
@@ -20,10 +20,12 @@ enum cpuinfo_cache_level {
extern struct cpuinfo_processor* cpuinfo_processors;
extern struct cpuinfo_core* cpuinfo_cores;
+extern struct cpuinfo_cluster* cpuinfo_clusters;
extern struct cpuinfo_package* cpuinfo_packages;
extern struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max];
extern uint32_t cpuinfo_processors_count;
extern uint32_t cpuinfo_cores_count;
+extern uint32_t cpuinfo_clusters_count;
extern uint32_t cpuinfo_packages_count;
extern uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max];
diff --git a/src/x86/linux/init.c b/src/x86/linux/init.c
index b54f533..3a219dd 100644
--- a/src/x86/linux/init.c
+++ b/src/x86/linux/init.c
@@ -131,6 +131,7 @@ void cpuinfo_x86_linux_init(void) {
struct cpuinfo_x86_linux_processor* x86_linux_processors = NULL;
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
+ struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_package* packages = NULL;
const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
@@ -234,6 +235,15 @@ void cpuinfo_x86_linux_init(void) {
cores_count * sizeof(struct cpuinfo_core), cores_count);
goto cleanup;
}
+
+ /* On x86 cluster of cores is a physical package */
+ clusters = calloc(packages_count, sizeof(struct cpuinfo_cluster));
+ if (clusters == NULL) {
+ cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters",
+ packages_count * sizeof(struct cpuinfo_cluster), packages_count);
+ goto cleanup;
+ }
+
packages = calloc(packages_count, sizeof(struct cpuinfo_package));
if (packages == NULL) {
cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages",
@@ -313,6 +323,7 @@ void cpuinfo_x86_linux_init(void) {
/* Initialize logical processor object */
processors[processor_index].smt_id = smt_id;
processors[processor_index].core = cores + core_index;
+ processors[processor_index].cluster = clusters + package_index;
processors[processor_index].package = packages + package_index;
processors[processor_index].linux_id = x86_linux_processors[i].linux_id;
processors[processor_index].apic_id = x86_linux_processors[i].apic_id;
@@ -323,11 +334,13 @@ void cpuinfo_x86_linux_init(void) {
.processor_start = processor_index,
.processor_count = 1,
.core_id = core_id,
+ .cluster = clusters + package_index,
.package = packages + package_index,
.vendor = x86_processor.vendor,
.uarch = x86_processor.uarch,
.cpuid = x86_processor.cpuid,
};
+ clusters[package_index].core_count += 1;
packages[package_index].core_count += 1;
last_apic_core_id = apid_core_id;
} else {
@@ -336,14 +349,29 @@ void cpuinfo_x86_linux_init(void) {
}
if (apic_package_id != last_apic_package_id) {
- /* new package */
+ /* new cluster/package */
+
+ clusters[package_index] = (struct cpuinfo_cluster) {
+ .processor_start = processor_index,
+ .processor_count = 1,
+ .core_start = core_index,
+ .core_count = 0,
+ .cluster_id = 0,
+ .package = packages + package_index,
+ .vendor = x86_processor.vendor,
+ .uarch = x86_processor.uarch,
+ .cpuid = x86_processor.cpuid,
+ };
packages[package_index].processor_start = processor_index;
packages[package_index].processor_count = 1;
packages[package_index].core_start = core_index;
+ packages[package_index].cluster_start = package_index;
+ packages[package_index].cluster_count = 1;
cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[package_index].name);
last_apic_package_id = apic_package_id;
} else {
- /* another logical processor on the same package */
+ /* another logical processor on the same cluster/package */
+ clusters[package_index].processor_count++;
packages[package_index].processor_count++;
}
@@ -492,6 +520,7 @@ void cpuinfo_x86_linux_init(void) {
cpuinfo_processors = processors;
cpuinfo_cores = cores;
+ cpuinfo_clusters = clusters;
cpuinfo_packages = packages;
cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
@@ -501,6 +530,7 @@ void cpuinfo_x86_linux_init(void) {
cpuinfo_processors_count = processors_count;
cpuinfo_cores_count = cores_count;
+ cpuinfo_clusters_count = packages_count;
cpuinfo_packages_count = packages_count;
cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1i_count;
cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1d_count;
@@ -512,6 +542,7 @@ void cpuinfo_x86_linux_init(void) {
linux_cpu_to_core_map = NULL;
processors = NULL;
cores = NULL;
+ clusters = NULL;
packages = NULL;
l1i = l1d = l2 = l3 = l4 = NULL;
@@ -521,6 +552,7 @@ cleanup:
free(x86_linux_processors);
free(processors);
free(cores);
+ free(clusters);
free(packages);
free(l1i);
free(l1d);
diff --git a/src/x86/mach/init.c b/src/x86/mach/init.c
index a319d18..dc04663 100644
--- a/src/x86/mach/init.c
+++ b/src/x86/mach/init.c
@@ -20,6 +20,7 @@ static inline uint32_t bit_mask(uint32_t bits) {
void cpuinfo_x86_mach_init(void) {
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
+ struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_package* packages = NULL;
struct cpuinfo_cache* l1i = NULL;
struct cpuinfo_cache* l1d = NULL;
@@ -40,9 +41,16 @@ void cpuinfo_x86_mach_init(void) {
mach_topology.cores * sizeof(struct cpuinfo_core), mach_topology.cores);
goto cleanup;
}
+ /* On x86 cluster of cores is a physical package */
+ clusters = calloc(mach_topology.packages, sizeof(struct cpuinfo_cluster));
+ if (clusters == NULL) {
+ cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters",
+ mach_topology.packages * sizeof(struct cpuinfo_cluster), mach_topology.packages);
+ goto cleanup;
+ }
packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package));
if (packages == NULL) {
- cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" packages",
+ cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages",
mach_topology.packages * sizeof(struct cpuinfo_package), mach_topology.packages);
goto cleanup;
}
@@ -57,12 +65,23 @@ void cpuinfo_x86_mach_init(void) {
const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages;
const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages;
for (uint32_t i = 0; i < mach_topology.packages; i++) {
- packages[i] = (struct cpuinfo_package) {
+ clusters[i] = (struct cpuinfo_cluster) {
.processor_start = i * threads_per_package,
.processor_count = threads_per_package,
.core_start = i * cores_per_package,
.core_count = cores_per_package,
+ .cluster_id = 0,
+ .package = packages + i,
+ .vendor = x86_processor.vendor,
+ .uarch = x86_processor.uarch,
+ .cpuid = x86_processor.cpuid,
};
+ packages[i].processor_start = i * threads_per_package;
+ packages[i].processor_count = threads_per_package;
+ packages[i].core_start = i * cores_per_package;
+ packages[i].core_count = cores_per_package;
+ packages[i].cluster_start = i;
+ packages[i].cluster_count = 1;
cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[i].name);
}
for (uint32_t i = 0; i < mach_topology.cores; i++) {
@@ -70,6 +89,7 @@ void cpuinfo_x86_mach_init(void) {
.processor_start = i * threads_per_core,
.processor_count = threads_per_core,
.core_id = i % cores_per_package,
+ .cluster = clusters + i / cores_per_package,
.package = packages + i / cores_per_package,
.vendor = x86_processor.vendor,
.uarch = x86_processor.uarch,
@@ -95,6 +115,7 @@ void cpuinfo_x86_mach_init(void) {
processors[i].smt_id = smt_id;
processors[i].core = cores + i / threads_per_core;
+ processors[i].cluster = clusters + i / threads_per_package;
processors[i].package = packages + i / threads_per_package;
processors[i].apic_id = apic_id;
}
@@ -292,6 +313,7 @@ void cpuinfo_x86_mach_init(void) {
cpuinfo_processors = processors;
cpuinfo_cores = cores;
+ cpuinfo_clusters = clusters;
cpuinfo_packages = packages;
cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count;
@@ -302,16 +324,19 @@ void cpuinfo_x86_mach_init(void) {
cpuinfo_processors_count = mach_topology.threads;
cpuinfo_cores_count = mach_topology.cores;
+ cpuinfo_clusters_count = mach_topology.packages;
cpuinfo_packages_count = mach_topology.packages;
processors = NULL;
cores = NULL;
+ clusters = NULL;
packages = NULL;
l1i = l1d = l2 = l3 = l4 = NULL;
cleanup:
free(processors);
free(cores);
+ free(clusters);
free(packages);
free(l1i);
free(l1d);
diff --git a/src/x86/windows/init.c b/src/x86/windows/init.c
index 40bc25a..966cddb 100644
--- a/src/x86/windows/init.c
+++ b/src/x86/windows/init.c
@@ -91,6 +91,7 @@ static void cpuinfo_x86_count_caches(
BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
+ struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_package* packages = NULL;
struct cpuinfo_cache* l1i = NULL;
struct cpuinfo_cache* l1d = NULL;
@@ -284,6 +285,13 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
goto cleanup;
}
+ clusters = HeapAlloc(heap, HEAP_ZERO_MEMORY, packages_count * sizeof(struct cpuinfo_cluster));
+ if (clusters == NULL) {
+ cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters",
+ packages_count * sizeof(struct cpuinfo_cluster), packages_count);
+ goto cleanup;
+ }
+
packages = HeapAlloc(heap, HEAP_ZERO_MEMORY, packages_count * sizeof(struct cpuinfo_package));
if (packages == NULL) {
cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages",
@@ -299,6 +307,9 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
struct cpuinfo_core* core =
(struct cpuinfo_core*) ((uintptr_t) cores + (uintptr_t) processor->core);
processor->core = core;
+ struct cpuinfo_cluster* cluster =
+ (struct cpuinfo_cluster*) ((uintptr_t) clusters + (uintptr_t) processor->cluster);
+ processor->cluster = cluster;
struct cpuinfo_package* package =
(struct cpuinfo_package*) ((uintptr_t) packages + (uintptr_t) processor->package);
processor->package = package;
@@ -307,6 +318,10 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
package->processor_start = processor_id;
package->processor_count += 1;
+ /* This can be overwritten by lower-index processors on the same cluster */
+ cluster->processor_start = processor_id;
+ cluster->processor_count += 1;
+
/* This can be overwritten by lower-index processors on the same core*/
core->processor_start = processor_id;
core->processor_count += 1;
@@ -318,6 +333,7 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
struct cpuinfo_core* core = cores + global_core_id;
const struct cpuinfo_processor* processor = processors + core->processor_start;
struct cpuinfo_package* package = (struct cpuinfo_package*) processor->package;
+ struct cpuinfo_cluster* cluster = (struct cpuinfo_cluster*) processor->cluster;
core->package = package;
core->core_id = core_bits_mask &
@@ -326,12 +342,20 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
core->uarch = x86_processor.uarch;
core->cpuid = x86_processor.cpuid;
- /* This can be overwritten by lower-index cores on the same package */
+ /* This can be overwritten by lower-index cores on the same cluster/package */
+ cluster->core_start = global_core_id;
+ cluster->core_count += 1;
package->core_start = global_core_id;
package->core_count += 1;
}
for (uint32_t i = 0; i < packages_count; i++) {
+ cluster->package = packages + i;
+ cluster->vendor = cores[cluster->core_start].vendor;
+ cluster->uarch = cores[cluster->core_start].uarch;
+ cluster->cpuid = cores[cluster->core_start].cpuid;
+ package->cluster_start = i;
+ package->cluster_count = 1;
cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[i].name);
}
@@ -543,6 +567,7 @@ BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PV
processors = NULL;
cores = NULL;
+ clusters = NULL;
packages = NULL;
l1i = l1d = l2 = l3 = l4 = NULL;
@@ -553,6 +578,9 @@ cleanup:
if (cores != NULL) {
HeapFree(heap, 0, cores);
}
+ if (clusters != NULL) {
+ HeapFree(heap, 0, clusters);
+ }
if (packages != NULL) {
HeapFree(heap, 0, packages);
}
diff --git a/test/init.cc b/test/init.cc
index 763060b..0ab0127 100644
--- a/test/init.cc
+++ b/test/init.cc
@@ -28,6 +28,15 @@ TEST(PROCESSOR, valid_smt_id) {
}
}
+TEST(PROCESSOR, valid_core) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ const cpuinfo_processor* processor = cpuinfo_get_processor(i);
+ ASSERT_TRUE(processor);
+
+ EXPECT_TRUE(processor->core);
+ }
+}
+
TEST(PROCESSOR, consistent_core) {
for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
const cpuinfo_processor* processor = cpuinfo_get_processor(i);
@@ -40,6 +49,36 @@ TEST(PROCESSOR, consistent_core) {
}
}
+TEST(PROCESSOR, valid_cluster) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ const cpuinfo_processor* processor = cpuinfo_get_processor(i);
+ ASSERT_TRUE(processor);
+
+ EXPECT_TRUE(processor->cluster);
+ }
+}
+
+TEST(PROCESSOR, consistent_cluster) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ const cpuinfo_processor* processor = cpuinfo_get_processor(i);
+ ASSERT_TRUE(processor);
+ const cpuinfo_cluster* cluster = processor->cluster;
+ ASSERT_TRUE(cluster);
+
+ EXPECT_GE(i, cluster->processor_start);
+ EXPECT_LT(i, cluster->processor_start + cluster->processor_count);
+ }
+}
+
+TEST(PROCESSOR, valid_package) {
+ for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
+ const cpuinfo_processor* processor = cpuinfo_get_processor(i);
+ ASSERT_TRUE(processor);
+
+ EXPECT_TRUE(processor->package);
+ }
+}
+
TEST(PROCESSOR, consistent_package) {
for (uint32_t i = 0; i < cpuinfo_get_processors_count(); i++) {
const cpuinfo_processor* processor = cpuinfo_get_processor(i);
@@ -144,6 +183,8 @@ TEST(CORE, consistent_processors) {
for (uint32_t i = 0; i < core->processor_count; i++) {
const cpuinfo_processor* processor = cpuinfo_get_processor(core->processor_start + i);
+ ASSERT_TRUE(processor);
+
EXPECT_EQ(core, processor->core);
}
}
@@ -160,6 +201,36 @@ TEST(CORE, valid_core_id) {
}
}
+TEST(CORE, valid_cluster) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ const cpuinfo_core* core = cpuinfo_get_core(i);
+ ASSERT_TRUE(core);
+
+ EXPECT_TRUE(core->cluster);
+ }
+}
+
+TEST(CORE, consistent_cluster) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ const cpuinfo_core* core = cpuinfo_get_core(i);
+ ASSERT_TRUE(core);
+ const cpuinfo_cluster* cluster = core->cluster;
+ ASSERT_TRUE(cluster);
+
+ EXPECT_GE(i, cluster->core_start);
+ EXPECT_LT(i, cluster->core_start + cluster->core_count);
+ }
+}
+
+TEST(CORE, valid_package) {
+ for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
+ const cpuinfo_core* core = cpuinfo_get_core(i);
+ ASSERT_TRUE(core);
+
+ EXPECT_TRUE(core->package);
+ }
+}
+
TEST(CORE, consistent_package) {
for (uint32_t i = 0; i < cpuinfo_get_cores_count(); i++) {
const cpuinfo_core* core = cpuinfo_get_core(i);
@@ -190,6 +261,196 @@ TEST(CORE, known_uarch) {
}
}
+TEST(CLUSTERS_COUNT, within_bounds) {
+ EXPECT_NE(0, cpuinfo_get_clusters_count());
+ EXPECT_LE(cpuinfo_get_clusters_count(), cpuinfo_get_cores_count());
+ EXPECT_LE(cpuinfo_get_clusters_count(), cpuinfo_get_processors_count());
+ EXPECT_GE(cpuinfo_get_clusters_count(), cpuinfo_get_packages_count());
+}
+
+TEST(CLUSTERS, non_null) {
+ EXPECT_TRUE(cpuinfo_get_clusters());
+}
+
+TEST(CLUSTER, non_null) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ EXPECT_TRUE(cpuinfo_get_cluster(i));
+ }
+}
+
+TEST(CLUSTER, non_zero_processors) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ EXPECT_NE(0, cluster->processor_count);
+ }
+}
+
+TEST(CLUSTER, valid_processors) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ EXPECT_LT(cluster->processor_start, cpuinfo_get_processors_count());
+ EXPECT_LE(cluster->processor_start + cluster->processor_count, cpuinfo_get_processors_count());
+ }
+}
+
+TEST(CLUSTER, consistent_processors) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->processor_count; j++) {
+ const cpuinfo_processor* processor = cpuinfo_get_processor(cluster->processor_start + j);
+ EXPECT_EQ(cluster, processor->cluster);
+ }
+ }
+}
+
+TEST(CLUSTER, non_zero_cores) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ EXPECT_NE(0, cluster->core_count);
+ }
+}
+
+TEST(CLUSTER, valid_cores) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ EXPECT_LT(cluster->core_start, cpuinfo_get_cores_count());
+ EXPECT_LE(cluster->core_start + cluster->core_count, cpuinfo_get_cores_count());
+ }
+}
+
+TEST(CLUSTER, consistent_cores) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+ ASSERT_TRUE(core);
+
+ EXPECT_EQ(cluster, core->cluster);
+ }
+ }
+}
+
+TEST(CLUSTER, valid_cluster_id) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_package* package = cluster->package;
+ ASSERT_TRUE(package);
+
+ EXPECT_LT(cluster->cluster_id, package->cluster_count);
+ }
+ }
+}
+
+TEST(CLUSTER, valid_package) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ EXPECT_TRUE(cluster->package);
+ }
+}
+
+TEST(CLUSTER, consistent_package) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+ const cpuinfo_package* package = cluster->package;
+ ASSERT_TRUE(package);
+
+ EXPECT_GE(i, package->cluster_start);
+ EXPECT_LT(i, package->cluster_start + package->cluster_count);
+ }
+}
+
+TEST(CLUSTER, consistent_vendor) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+ ASSERT_TRUE(core);
+
+ EXPECT_EQ(cluster->vendor, core->vendor);
+ }
+ }
+}
+
+TEST(CLUSTER, consistent_uarch) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+ ASSERT_TRUE(core);
+
+ EXPECT_EQ(cluster->uarch, core->uarch);
+ }
+ }
+}
+
+#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
+TEST(CLUSTER, consistent_cpuid) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+ ASSERT_TRUE(core);
+
+ EXPECT_EQ(cluster->cpuid, core->cpuid);
+ }
+ }
+}
+#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */
+
+#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
+TEST(CLUSTER, consistent_midr) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+ ASSERT_TRUE(core);
+
+ EXPECT_EQ(cluster->midr, core->midr);
+ }
+ }
+}
+#endif /* CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 */
+
+TEST(CLUSTER, consistent_frequency) {
+ for (uint32_t i = 0; i < cpuinfo_get_clusters_count(); i++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(i);
+ ASSERT_TRUE(cluster);
+
+ for (uint32_t j = 0; j < cluster->core_count; j++) {
+ const cpuinfo_core* core = cpuinfo_get_core(cluster->core_start + j);
+ ASSERT_TRUE(core);
+
+ EXPECT_EQ(cluster->frequency, core->frequency);
+ }
+ }
+}
+
TEST(PACKAGES_COUNT, within_bounds) {
EXPECT_NE(0, cpuinfo_get_packages_count());
EXPECT_LE(cpuinfo_get_packages_count(), cpuinfo_get_cores_count());
@@ -232,6 +493,8 @@ TEST(PACKAGE, consistent_processors) {
for (uint32_t j = 0; j < package->processor_count; j++) {
const cpuinfo_processor* processor = cpuinfo_get_processor(package->processor_start + j);
+ ASSERT_TRUE(processor);
+
EXPECT_EQ(package, processor->package);
}
}
@@ -242,7 +505,7 @@ TEST(PACKAGE, non_zero_cores) {
const cpuinfo_package* package = cpuinfo_get_package(i);
ASSERT_TRUE(package);
- EXPECT_NE(0, package->processor_count);
+ EXPECT_NE(0, package->core_count);
}
}
@@ -263,11 +526,46 @@ TEST(PACKAGE, consistent_cores) {
for (uint32_t j = 0; j < package->core_count; j++) {
const cpuinfo_core* core = cpuinfo_get_core(package->core_start + j);
+ ASSERT_TRUE(core);
+
EXPECT_EQ(package, core->package);
}
}
}
+TEST(PACKAGE, non_zero_clusters) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ const cpuinfo_package* package = cpuinfo_get_package(i);
+ ASSERT_TRUE(package);
+
+ EXPECT_NE(0, package->cluster_count);
+ }
+}
+
+TEST(PACKAGE, valid_clusters) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ const cpuinfo_package* package = cpuinfo_get_package(i);
+ ASSERT_TRUE(package);
+
+ EXPECT_LT(package->cluster_start, cpuinfo_get_clusters_count());
+ EXPECT_LE(package->cluster_start + package->cluster_count, cpuinfo_get_clusters_count());
+ }
+}
+
+TEST(PACKAGE, consistent_cluster) {
+ for (uint32_t i = 0; i < cpuinfo_get_packages_count(); i++) {
+ const cpuinfo_package* package = cpuinfo_get_package(i);
+ ASSERT_TRUE(package);
+
+ for (uint32_t j = 0; j < package->cluster_count; j++) {
+ const cpuinfo_cluster* cluster = cpuinfo_get_cluster(package->cluster_start + j);
+ ASSERT_TRUE(cluster);
+
+ EXPECT_EQ(package, cluster->package);
+ }
+ }
+}
+
TEST(L1I_CACHES_COUNT, within_bounds) {
EXPECT_NE(0, cpuinfo_get_l1i_caches_count());
EXPECT_LE(cpuinfo_get_l1i_caches_count(), cpuinfo_get_processors_count());