aboutsummaryrefslogtreecommitdiff
path: root/src/arm/linux
diff options
context:
space:
mode:
authorMarat Dukhan <marat@fb.com>2017-09-11 10:23:21 -0700
committerMarat Dukhan <marat@fb.com>2017-09-11 10:23:21 -0700
commit9403e5af92415e28dd84deb9faf3308bc3bac276 (patch)
tree2e821ae48e5ecd8ca8c461490cf1a3a138213f8b /src/arm/linux
parent97d76ca20a5f5b40ae3b36521aea57dd50973354 (diff)
downloadcpuinfo-9403e5af92415e28dd84deb9faf3308bc3bac276.tar.gz
Correctly detect big.LITTLE and Max.Med.Min clusters
Diffstat (limited to 'src/arm/linux')
-rw-r--r--src/arm/linux/api.h45
-rw-r--r--src/arm/linux/clusters.c493
-rw-r--r--src/arm/linux/init.c335
-rw-r--r--src/arm/linux/midr.c762
4 files changed, 1374 insertions, 261 deletions
diff --git a/src/arm/linux/api.h b/src/arm/linux/api.h
index 53b8f38..ee42c1d 100644
--- a/src/arm/linux/api.h
+++ b/src/arm/linux/api.h
@@ -5,6 +5,7 @@
#include <cpuinfo.h>
#include <arm/midr.h>
+#include <arm/api.h>
#include <linux/api.h>
/* No hard limit in the kernel, maximum length observed on non-rogue kernels is 64 */
@@ -123,34 +124,11 @@ struct cpuinfo_arm_linux_processor {
enum cpuinfo_vendor vendor;
enum cpuinfo_uarch uarch;
/**
- * ID of the core which includes this logical processor.
- * The value is parsed from /sys/devices/system/cpu/cpu<N>/topology/core_id
- */
- uint32_t core_id;
- /**
- * Maximum processor ID on the core which includes this logical processor.
- * This value can serve as an ID for the cluster of logical processors: it is the
- * same for all logical processors on the same core.
- */
- uint32_t core_group_max;
- /**
- * Minimum processor ID on the core which includes this logical processor.
- * This value can serve as an ID for the cluster of logical processors: it is the
- * same for all logical processors on the same core.
- */
- uint32_t core_group_min;
- /**
* ID of the physical package which includes this logical processor.
* The value is parsed from /sys/devices/system/cpu/cpu<N>/topology/physical_package_id
*/
uint32_t package_id;
/**
- * Maximum processor ID on the package which includes this logical processor.
- * This value can serve as an ID for the cluster of logical processors: it is the
- * same for all logical processors on the same package.
- */
- uint32_t package_group_max;
- /**
* Minimum processor ID on the package which includes this logical processor.
* This value can serve as an ID for the cluster of logical processors: it is the
* same for all logical processors on the same package.
@@ -270,3 +248,24 @@ bool cpuinfo_arm_linux_parse_proc_cpuinfo(
const struct cpuinfo_arm_linux_processor processors[restrict static 1],
struct cpuinfo_arm_isa isa[restrict static 1]);
#endif
+
+bool cpuinfo_arm_linux_detect_core_clusters_by_heuristic(
+ uint32_t usable_processors,
+ uint32_t max_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
+
+void cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(
+ uint32_t max_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
+
+void cpuinfo_arm_linux_count_cluster_processors(
+ uint32_t max_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
+
+uint32_t cpuinfo_arm_linux_detect_cluster_midr(
+#if defined(__ANDROID__)
+ const struct cpuinfo_arm_chipset chipset[restrict static 1],
+#endif
+ uint32_t max_processors,
+ uint32_t usable_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
diff --git a/src/arm/linux/clusters.c b/src/arm/linux/clusters.c
new file mode 100644
index 0000000..4c0015c
--- /dev/null
+++ b/src/arm/linux/clusters.c
@@ -0,0 +1,493 @@
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cpuinfo.h>
+#include <arm/linux/api.h>
+#if defined(__ANDROID__)
+ #include <arm/android/api.h>
+#endif
+#include <arm/api.h>
+#include <arm/midr.h>
+#include <linux/api.h>
+#include <api.h>
+#include <log.h>
+
+static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
+ return (bitfield & mask) == mask;
+}
+
+/*
+ * Assigns logical processors to clusters of cores using heuristic based on the typical configuration of clusters for
+ * 5, 6, 8, and 10 cores:
+ * - 5 cores (ARM32 Android only): 2 clusters of 4+1 cores
+ * - 6 cores: 2 clusters of 4+2 cores
+ * - 8 cores: 2 clusters of 4+4 cores
+ * - 10 cores: 3 clusters of 4+4+2 cores
+ *
+ * The function must be called after parsing OS-provided information on core clusters.
+ * Its purpose is to detect clusters of cores when OS-provided information is lacking or incomplete, i.e.
+ * - Linux kernel is not configured to report information in sysfs topology leaf.
+ * - Linux kernel reports topology information only for online cores, and only cores on one cluster are online, e.g.:
+ * - Exynos 8890 has 8 cores in 4+4 clusters, but only the first cluster of 4 cores is reported, and cluster
+ * configuration of logical processors 4-7 is not reported (all remaining processors 4-7 form cluster 1)
+ * - MT6797 has 10 cores in 4+4+2, but only the first cluster of 4 cores is reported, and cluster configuration
+ * of logical processors 4-9 is not reported (processors 4-7 form cluster 1, and processors 8-9 form cluster 2).
+ *
+ * Heuristic assignment of processors to the above pre-defined clusters fails if such assignment would contradict
+ * information provided by the operating system:
+ * - Any of the OS-reported processor clusters is different than the corresponding heuristic cluster.
+ * - Processors in a heuristic cluster have no OS-provided cluster siblings information, but have known and different
+ * minimum/maximum frequency.
+ * - Processors in a heuristic cluster have no OS-provided cluster siblings information, but have known and different
+ * MIDR components.
+ *
+ * If the heuristic assignment of processors to clusters of cores fails, all processors' clusters are unchanged.
+ *
+ * @param usable_processors - number of processors in the @p processors array with CPUINFO_LINUX_MASK_USABLE flags.
+ * @param max_processors - number of elements in the @p processors array.
+ * @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE and PRESENT flags, minimum/maximum
+ * frequency, MIDR infromation, and core cluster (package siblings list) information.
+ *
+ * @retval true if the heuristic successfully assigned all processors into clusters of cores.
+ * @retval false if known details about processors contradict the heuristic configuration of core clusters.
+ */
+bool cpuinfo_arm_linux_detect_core_clusters_by_heuristic(
+ uint32_t usable_processors,
+ uint32_t max_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors])
+{
+ uint32_t cluster_processors[3];
+ switch (usable_processors) {
+ case 10:
+ cluster_processors[0] = 4;
+ cluster_processors[1] = 4;
+ cluster_processors[2] = 2;
+ break;
+ case 8:
+ cluster_processors[0] = 4;
+ cluster_processors[1] = 4;
+ break;
+ case 6:
+ cluster_processors[0] = 4;
+ cluster_processors[1] = 2;
+ break;
+#if defined(__ANDROID__) && CPUINFO_ARCH_ARM
+ case 5:
+ /*
+ * The only processor with 5 cores is Leadcore L1860C (ARMv7, mobile),
+ * but this configuration is not too unreasonable for a virtualized ARM server.
+ */
+ cluster_processors[0] = 4;
+ cluster_processors[1] = 1;
+ break;
+#endif
+ default:
+ return false;
+ }
+
+ /*
+ * Assignment of processors to core clusters is done in two passes:
+ * 1. Verify that the clusters proposed by heuristic are compatible with known details about processors.
+ * 2. If verification passed, update core clusters for the processors.
+ */
+
+ uint32_t cluster = 0;
+ uint32_t expected_cluster_processors = 0;
+ uint32_t cluster_start, cluster_flags, cluster_midr, cluster_max_frequency, cluster_min_frequency;
+ bool expected_cluster_exists;
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ if (expected_cluster_processors == 0) {
+ /* Expect this processor to start a new cluster */
+
+ expected_cluster_exists = !!(processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER);
+ if (expected_cluster_exists) {
+ if (processors[i].package_group_min != i) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "processor %"PRIu32" is expected to start a new cluster #%"PRIu32" with %"PRIu32" cores, "
+ "but system siblings lists reported it as a sibling of processor %"PRIu32,
+ i, cluster, cluster_processors[cluster], processors[i].package_group_min);
+ return false;
+ }
+ } else {
+ cluster_flags = 0;
+ }
+
+ cluster_start = i;
+ expected_cluster_processors = cluster_processors[cluster++];
+ } else {
+ /* Expect this processor to belong to the same cluster as processor */
+
+ if (expected_cluster_exists) {
+ /*
+ * The cluster suggested by the heuristic was already parsed from system siblings lists.
+ * For all processors we expect in the cluster, check that:
+ * - They have pre-assigned cluster from siblings lists (CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER flag).
+ * - They were assigned to the same cluster based on siblings lists
+ * (package_group_min points to the first processor in the cluster).
+ */
+
+ if ((processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) == 0) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "processor %"PRIu32" is expected to belong to the cluster of processor %"PRIu32", "
+ "but system siblings lists did not report it as a sibling of processor %"PRIu32,
+ i, cluster_start, cluster_start);
+ return false;
+ }
+ if (processors[i].package_group_min != cluster_start) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "processor %"PRIu32" is expected to belong to the cluster of processor %"PRIu32", "
+ "but system siblings lists reported it to belong to the cluster of processor %"PRIu32,
+ i, cluster_start, cluster_start);
+ return false;
+ }
+ } else {
+ /*
+ * The cluster suggest by the heuristic was not parsed from system siblings lists.
+ * For all processors we expect in the cluster, check that:
+ * - They have no pre-assigned cluster from siblings lists.
+ * - If their min/max CPU frequency is known, it is the same.
+ * - If any part of their MIDR (Implementer, Variant, Part, Revision) is known, it is the same.
+ */
+
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "processor %"PRIu32" is expected to be unassigned to any cluster, "
+ "but system siblings lists reported it to belong to the cluster of processor %"PRIu32,
+ i, processors[i].package_group_min);
+ return false;
+ }
+
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
+ if (cluster_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
+ if (cluster_min_frequency != processors[i].min_frequency) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "minimum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of its expected cluster (%"PRIu32" KHz)",
+ i, processors[i].min_frequency, cluster_min_frequency);
+ return false;
+ }
+ } else {
+ cluster_min_frequency = processors[i].min_frequency;
+ cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ if (cluster_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ if (cluster_max_frequency != processors[i].max_frequency) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "maximum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of its expected cluster (%"PRIu32" KHz)",
+ i, processors[i].max_frequency, cluster_max_frequency);
+ return false;
+ }
+ } else {
+ cluster_max_frequency = processors[i].max_frequency;
+ cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK)) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "CPU Implementer of processor %"PRIu32" (0x%02"PRIx32") is different than of its expected cluster (0x%02"PRIx32")",
+ i, midr_get_implementer(processors[i].midr), midr_get_implementer(cluster_midr));
+ return false;
+ }
+ } else {
+ cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_VARIANT_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_VARIANT_MASK)) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "CPU Variant of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")",
+ i, midr_get_variant(processors[i].midr), midr_get_variant(cluster_midr));
+ return false;
+ }
+ } else {
+ cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_PART_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_PART_MASK)) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "CPU Part of processor %"PRIu32" (0x%03"PRIx32") is different than of its expected cluster (0x%03"PRIx32")",
+ i, midr_get_part(processors[i].midr), midr_get_part(cluster_midr));
+ return false;
+ }
+ } else {
+ cluster_midr = midr_copy_part(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_REVISION_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_REVISION_MASK)) {
+ cpuinfo_log_debug(
+ "heuristic detection of core clusters failed: "
+ "CPU Revision of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")",
+ i, midr_get_revision(cluster_midr), midr_get_revision(processors[i].midr));
+ return false;
+ }
+ } else {
+ cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION;
+ }
+ }
+ }
+ }
+ expected_cluster_processors--;
+ }
+ }
+
+ /* Verification passed, assign all processors to new clusters */
+ cluster = 0;
+ expected_cluster_processors = 0;
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ if (expected_cluster_processors == 0) {
+ /* Expect this processor to start a new cluster */
+
+ cluster_start = i;
+ expected_cluster_processors = cluster_processors[cluster++];
+ } else {
+ /* Expect this processor to belong to the same cluster as processor */
+
+ if (!(processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
+ cpuinfo_log_debug("assigned processor %"PRIu32" to cluster of processor %"PRIu32" based on heuristic",
+ i, cluster_start);
+ }
+
+ processors[i].package_group_min = cluster_start;
+ processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+ }
+ expected_cluster_processors--;
+ }
+ }
+ return true;
+}
+
+/*
+ * Assigns logical processors to clusters of cores in sequential manner:
+ * - Clusters detected from OS-provided information are unchanged:
+ * - Processors assigned to these clusters stay assigned to the same clusters
+ * - No new processors are added to these clusters
+ * - Processors without pre-assigned cluster are clustered in one sequential scan:
+ * - If known details (min/max frequency, MIDR components) of a processor are compatible with a preceeding
+ * processor, without pre-assigned cluster, the processor is assigned to the cluster of the preceeding processor.
+ * - If known details (min/max frequency, MIDR components) of a processor are not compatible with a preceeding
+ * processor, the processor is assigned to a newly created cluster.
+ *
+ * The function must be called after parsing OS-provided information on core clusters, and usually is called only
+ * if heuristic assignment of processors to clusters (cpuinfo_arm_linux_cluster_processors_by_heuristic) failed.
+ *
+ * Its purpose is to detect clusters of cores when OS-provided information is lacking or incomplete, i.e.
+ * - Linux kernel is not configured to report information in sysfs topology leaf.
+ * - Linux kernel reports topology information only for online cores, and all cores on some of the clusters are offline.
+ *
+ * Sequential assignment of processors to clusters always succeeds, and upon exit, all usable processors in the
+ * @p processors array have cluster information.
+ *
+ * @param max_processors - number of elements in the @p processors array.
+ * @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE and PRESENT flags, minimum/maximum
+ * frequency, MIDR infromation, and core cluster (package siblings list) information.
+ *
+ * @retval true if the heuristic successfully assigned all processors into clusters of cores.
+ * @retval false if known details about processors contradict the heuristic configuration of core clusters.
+ */
+void cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(
+ uint32_t max_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors])
+{
+ uint32_t cluster_flags = 0;
+ uint32_t cluster_processors = 0;
+ uint32_t cluster_start, cluster_midr, cluster_max_frequency, cluster_min_frequency;
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if ((processors[i].flags & (CPUINFO_LINUX_MASK_USABLE | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) == CPUINFO_LINUX_MASK_USABLE) {
+ if (cluster_processors == 0) {
+ goto new_cluster;
+ }
+
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
+ if (cluster_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
+ if (cluster_min_frequency != processors[i].min_frequency) {
+ cpuinfo_log_info(
+ "minimum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of preceeding cluster (%"PRIu32" KHz); "
+ "processor %"PRIu32" starts to a new cluster",
+ i, processors[i].min_frequency, cluster_min_frequency, i);
+ goto new_cluster;
+ }
+ } else {
+ cluster_min_frequency = processors[i].min_frequency;
+ cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ if (cluster_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ if (cluster_max_frequency != processors[i].max_frequency) {
+ cpuinfo_log_debug(
+ "maximum frequency of processor %"PRIu32" (%"PRIu32" KHz) is different than of preceeding cluster (%"PRIu32" KHz); "
+ "processor %"PRIu32" starts a new cluster",
+ i, processors[i].max_frequency, cluster_max_frequency, i);
+ goto new_cluster;
+ }
+ } else {
+ cluster_max_frequency = processors[i].max_frequency;
+ cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK)) {
+ cpuinfo_log_debug(
+ "CPU Implementer of processor %"PRIu32" (0x%02"PRIx32") is different than of preceeding cluster (0x%02"PRIx32"); "
+ "processor %"PRIu32" starts to a new cluster",
+ i, midr_get_implementer(processors[i].midr), midr_get_implementer(cluster_midr), i);
+ goto new_cluster;
+ }
+ } else {
+ cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_VARIANT_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_VARIANT_MASK)) {
+ cpuinfo_log_debug(
+ "CPU Variant of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")"
+ "processor %"PRIu32" starts to a new cluster",
+ i, midr_get_variant(processors[i].midr), midr_get_variant(cluster_midr), i);
+ goto new_cluster;
+ }
+ } else {
+ cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_PART_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_PART_MASK)) {
+ cpuinfo_log_debug(
+ "CPU Part of processor %"PRIu32" (0x%03"PRIx32") is different than of its expected cluster (0x%03"PRIx32")"
+ "processor %"PRIu32" starts to a new cluster",
+ i, midr_get_part(processors[i].midr), midr_get_part(cluster_midr), i);
+ goto new_cluster;
+ }
+ } else {
+ cluster_midr = midr_copy_part(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART;
+ }
+ }
+
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ if (cluster_flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ if ((cluster_midr & CPUINFO_ARM_MIDR_REVISION_MASK) != (processors[i].midr & CPUINFO_ARM_MIDR_REVISION_MASK)) {
+ cpuinfo_log_debug(
+ "CPU Revision of processor %"PRIu32" (0x%"PRIx32") is different than of its expected cluster (0x%"PRIx32")"
+ "processor %"PRIu32" starts to a new cluster",
+ i, midr_get_revision(cluster_midr), midr_get_revision(processors[i].midr), i);
+ goto new_cluster;
+ }
+ } else {
+ cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION;
+ }
+ }
+
+ /* All checks passed, attach processor to the preceeding cluster */
+ cluster_processors++;
+ processors[i].package_group_min = cluster_start;
+ processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+ cpuinfo_log_debug("assigned processor %"PRIu32" to preceeding cluster of processor %"PRIu32, i, cluster_start);
+ continue;
+
+new_cluster:
+ /* Create a new cluster starting with processor i */
+ cluster_start = i;
+ processors[i].package_group_min = i;
+ processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+ cluster_processors = 1;
+
+ /* Copy known information from processor to cluster, and set the flags accordingly */
+ cluster_flags = 0;
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
+ cluster_min_frequency = processors[i].min_frequency;
+ cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
+ }
+ if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ cluster_max_frequency = processors[i].max_frequency;
+ cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
+ }
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER;
+ }
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT;
+ }
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ cluster_midr = midr_copy_part(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART;
+ }
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr);
+ cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION;
+ }
+ }
+ }
+}
+
+/*
+ * Counts the number of logical processors in each core cluster.
+ * This function should be called after all processors are assigned to core clusters.
+ *
+ * @param max_processors - number of elements in the @p processors array.
+ * @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE and PRESENT flags,
+ * and decoded core cluster (package_group_min) information.
+ * The function expects the value of processors[i].package_processor_count to be zero.
+ * Upon return, processors[i].package_processor_count will contain the number of logical
+ * processors in the respective core cluster.
+ */
+void cpuinfo_arm_linux_count_cluster_processors(
+ uint32_t max_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors])
+{
+ /* First pass: accumulate the number of processors at the group leader's package_processor_count */
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ const uint32_t package_group_min = processors[i].package_group_min;
+ processors[package_group_min].package_processor_count += 1;
+ }
+ }
+ /* Second pass: copy the package_processor_count from the group leader processor */
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ const uint32_t package_group_min = processors[i].package_group_min;
+ processors[i].package_processor_count = processors[package_group_min].package_processor_count;
+ }
+ }
+}
diff --git a/src/arm/linux/init.c b/src/arm/linux/init.c
index f4ad85e..abf2b17 100644
--- a/src/arm/linux/init.c
+++ b/src/arm/linux/init.c
@@ -33,6 +33,34 @@ static inline int cmp(uint32_t a, uint32_t b) {
return (a > b) - (a < b);
}
+static bool cluster_siblings_parser(
+ uint32_t processor, uint32_t siblings_start, uint32_t siblings_end,
+ struct cpuinfo_arm_linux_processor* processors)
+{
+ processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+ uint32_t package_group_min = processors[processor].package_group_min;
+
+ for (uint32_t sibling = siblings_start; sibling < siblings_end; sibling++) {
+ if (!bitmask_all(processors[sibling].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ cpuinfo_log_info("invalid processor %"PRIu32" reported as a sibling for processor %"PRIu32,
+ sibling, processor);
+ continue;
+ }
+
+ const uint32_t sibling_package_group_min = processors[sibling].package_group_min;
+ if (sibling_package_group_min < package_group_min) {
+ package_group_min = sibling_package_group_min;
+ }
+
+ processors[sibling].package_group_min = package_group_min;
+ processors[sibling].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+ }
+
+ processors[processor].package_group_min = package_group_min;
+
+ return true;
+}
+
static int cmp_x86_processor_by_apic_id(const void* ptr_a, const void* ptr_b) {
const struct cpuinfo_arm_linux_processor* processor_a = (const struct cpuinfo_arm_linux_processor*) ptr_a;
const struct cpuinfo_arm_linux_processor* processor_b = (const struct cpuinfo_arm_linux_processor*) ptr_b;
@@ -48,10 +76,10 @@ static int cmp_x86_processor_by_apic_id(const void* ptr_a, const void* ptr_b) {
const uint32_t midr_a = processor_a->midr;
const uint32_t midr_b = processor_b->midr;
if (midr_a != midr_b) {
- if (midr_is_big_core(midr_a) || midr_is_little_core(midr_b)) {
- return -1;
- } else if (midr_is_big_core(midr_b) || midr_is_little_core(midr_a)) {
- return 1;
+ const uint32_t score_a = midr_score_core(midr_a);
+ const uint32_t score_b = midr_score_core(midr_b);
+ if (score_a != score_b) {
+ return score_a > score_b ? -1 : 1;
}
}
@@ -124,6 +152,13 @@ void cpuinfo_arm_linux_init(void) {
return;
}
+ for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
+ if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ cpuinfo_log_debug("parsed processor %"PRIu32" MIDR 0x%08"PRIx32,
+ i, arm_linux_processors[i].midr);
+ }
+ }
+
uint32_t usable_processors = 0;
uint32_t known_processors = 0;
uint32_t last_reported_processor = 0;
@@ -172,7 +207,7 @@ void cpuinfo_arm_linux_init(void) {
}
}
- /* Detect min/max frequency, core ID, and package ID */
+ /* Detect min/max frequency and package ID */
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
const uint32_t max_frequency = cpuinfo_linux_get_processor_max_frequency(i);
@@ -195,8 +230,7 @@ void cpuinfo_arm_linux_init(void) {
/* Initialize topology group IDs */
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- arm_linux_processors[i].core_group_min = arm_linux_processors[i].core_group_max = i;
- arm_linux_processors[i].package_group_min = arm_linux_processors[i].package_group_max = i;
+ arm_linux_processors[i].package_group_min = i;
}
/* Propagate topology group IDs among siblings */
@@ -205,259 +239,89 @@ void cpuinfo_arm_linux_init(void) {
continue;
}
- if ((arm_linux_processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_ID) == 0) {
- continue;
- }
-
- cpuinfo_linux_detect_core_siblings(
- arm_linux_processors_count,
- i,
- &arm_linux_processors->flags,
- &arm_linux_processors->package_id,
- &arm_linux_processors->package_group_min,
- &arm_linux_processors->package_group_max,
- sizeof(struct cpuinfo_arm_linux_processor));
- }
-
- /*
- * Topology information about some or all logical processors may be unavailable, for the following reasons:
- * - Linux kernel is too old, or configured without support for topology information in sysfs.
- * - Core is offline, and Linux kernel is configured to not report topology for offline cores.
- *
- * In these cases, we use a fall-back mechanism for topology detection, based on the assumption that equivalent
- * cores belong to the same cluster:
- * - Cores with the same min/max frequency and microarchitecture are assumed to belong to the same cluster.
- * - If min or max frequency is not known for any of the cores, but microarchitecture for both cores is the same,
- * and different from Cortex-A53, both cores are assumed to belong to the same cluster. Cortex-A53 is the only
- * microarchitecture, which is simultaneously used in multiple clusters in the same SoCs, e.g. Qualcomm
- * Snapdragon 615 combines 4 "big" Cortex-A53 cores + 4 "LITTLE" Cortex-A53 cores, and MediaTek Helio X20
- * combines 2 "max" Cortex-A72 cores + 4 "med" Cortex-A53 cores + 4 "min" Cortex-A53 cores.
- * - If microarchitecture is not known, but min/max frequency are the same for two cores, assume both cores
- * belong to the same cluster.
- */
- for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
- }
-
if (arm_linux_processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_ID) {
- continue;
- }
-
- for (uint32_t j = 0; j < arm_linux_processors_count; j++) {
- if (i == j) {
- continue;
- }
-
- if (!bitmask_all(arm_linux_processors[j].flags, CPUINFO_LINUX_MASK_USABLE)) {
- /* Logical processor is not possible or not present */
- continue;
- }
-
- if (arm_linux_processors[j].flags & CPUINFO_LINUX_FLAG_PACKAGE_ID) {
- /* Cluster for this processor was already parsed from sysfs */
- continue;
- }
-
- if (cpuinfo_arm_linux_processor_equals(&arm_linux_processors[i], &arm_linux_processors[j])) {
- cpuinfo_log_info(
- "processors %"PRIu32" and %"PRIu32" are assigned to the same cluster based on similarity", i, j);
-
- arm_linux_processors[i].package_group_min = arm_linux_processors[j].package_group_min =
- min(arm_linux_processors[i].package_group_min, arm_linux_processors[j].package_group_min);
- arm_linux_processors[i].package_group_max = arm_linux_processors[j].package_group_max =
- max(arm_linux_processors[i].package_group_max, arm_linux_processors[j].package_group_max);
- arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
- arm_linux_processors[j].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
- }
+ cpuinfo_linux_detect_core_siblings(
+ arm_linux_processors_count, i,
+ (cpuinfo_siblings_callback) cluster_siblings_parser,
+ arm_linux_processors);
}
}
- /*
- * It may happen that neither of sysfs topology information, min/max frequencies, or microarchitecture
- * is known for some or all cores. This can happen for the following reasons:
- * - Kernel is configured without support for sysfs cpufreq and topology information, and reports
- * detailed information only for one of the cores listed in /proc/cpuinfo
- * - Some of the cores are offline, and Linux kernel is configured to report information only about
- * online cores.
- *
- * In this case, it is generally impossible to reconstruct topology information, and we use a heuristic:
- * each core which wasn't assigned to any cluster yet, is assumed to belong to the same cluster as
- * the preceeding core for which no sysfs information is available.
- */
- uint32_t cluster_processor_id = 0;
- bool last_processor_has_sysfs_topology = false;
+ /* Propagate all cluster IDs */
+ uint32_t clustered_processors = 0;
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
- }
+ if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
+ clustered_processors += 1;
- if (arm_linux_processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_ID) {
- /* sysfs topology information is available for this processor */
- last_processor_has_sysfs_topology = true;
- } else {
- if (last_processor_has_sysfs_topology) {
- /*
- * Subsequent processors unassigned to any cluster will be added to the cluster of this
- * processor. Note that if this processor itself is not assigned to any cluster,
- * it will start a new cluster of processors.
- */
- cluster_processor_id = i;
+ const uint32_t package_group_min = arm_linux_processors[i].package_group_min;
+ if (package_group_min < i) {
+ arm_linux_processors[i].package_group_min = arm_linux_processors[package_group_min].package_group_min;
}
- last_processor_has_sysfs_topology = false;
- }
- if (!(arm_linux_processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
- // TODO: check that processors are not the same
- if (cluster_processor_id == i) {
- cpuinfo_log_info("processor %"PRIu32" is assumed to belong to a new cluster", i);
- } else {
- cpuinfo_log_info("processor %"PRIu32" is assumed to belong to the cluster of processor %"PRIu32,
- i, cluster_processor_id);
- arm_linux_processors[i].package_group_min = arm_linux_processors[cluster_processor_id].package_group_min;
- arm_linux_processors[cluster_processor_id].package_group_max =
- arm_linux_processors[i].package_group_max =
- max(i, arm_linux_processors[cluster_processor_id].package_group_max);
- }
- arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
+ cpuinfo_log_debug("processor %"PRIu32" clustered with processor %"PRIu32" as inferred from system siblings lists",
+ i, arm_linux_processors[i].package_group_min);
}
}
- /*
- * Run Shiloach-Vishkin (well, almost) connected components algorithm
- */
- uint32_t update;
- do {
- update = 0;
- for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
- }
-
- const uint32_t group_max_processor_id = arm_linux_processors[i].package_group_max;
- const uint32_t group_min_processor_id = arm_linux_processors[i].package_group_min;
-
- const uint32_t group_max_processor_group_max = arm_linux_processors[group_max_processor_id].package_group_max;
- const uint32_t group_max_processor_group_min = arm_linux_processors[group_max_processor_id].package_group_min;
- const uint32_t group_min_processor_group_max = arm_linux_processors[group_min_processor_id].package_group_max;
- const uint32_t group_min_processor_group_min = arm_linux_processors[group_min_processor_id].package_group_min;
-
- const uint32_t new_group_max_processor_id = max(group_max_processor_group_max, group_min_processor_group_max);
- const uint32_t new_group_min_processor_id = min(group_min_processor_group_min, group_max_processor_group_min);
-
- arm_linux_processors[i].package_group_max =
- arm_linux_processors[group_max_processor_id].package_group_max =
- arm_linux_processors[group_min_processor_id].package_group_max =
- new_group_max_processor_id;
- arm_linux_processors[i].package_group_min =
- arm_linux_processors[group_max_processor_id].package_group_min =
- arm_linux_processors[group_min_processor_id].package_group_min =
- new_group_min_processor_id;
-
- update |= (group_max_processor_id ^ new_group_max_processor_id) | (group_min_processor_id ^ new_group_min_processor_id) |
- (group_max_processor_group_max ^ new_group_max_processor_id) | (group_max_processor_group_min ^ new_group_min_processor_id) |
- (group_min_processor_group_max ^ new_group_max_processor_id) | (group_min_processor_group_min ^ new_group_min_processor_id);
- }
- } while (update != 0);
-
- uint32_t cluster_count = 0;
- for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
- }
-
- if (arm_linux_processors[i].package_group_min == i) {
- cluster_count += 1;
- }
- }
- cpuinfo_log_info("detected %"PRIu32" core clusters", cluster_count);
-
- /*
- * Two relations between reported /proc/cpuinfo information, and cores is possible:
- * - /proc/cpuinfo reports information for all or some of the cores below the corresponding
- * "processor : <number>" lines. Information on offline cores may be missing.
- * - /proc/cpuinfo reports information only once, after all "processor : <number>" lines.
- * The reported information may relate to processor #0 or to the processor which
- * executed the system calls to read /proc/cpuinfo. It is also indistinguishable
- * from /proc/cpuinfo reporting information only for the last core (e.g. if all other
- * cores are offline).
- *
- * We detect the second case by checking if /proc/cpuinfo contains valid MIDR only for one,
- * last reported, processor. Note, that the last reported core may be not the last
- * present+possible processor, as /proc/cpuinfo may not report high-index offline cores.
- */
- if (usable_processors != 1 && known_processors == 1 && last_reported_processor == last_reported_midr && cluster_count > 1) {
- cpuinfo_log_error("not sufficient per-cluster information");
- } else {
+ if (clustered_processors != usable_processors) {
/*
- * Propagate MIDR, vendor, and microarchitecture values along clusters in two passes:
- * - Copy MIDR to min processor of a cluster, if it doesn't have this information
- * - Copy max frequency to min processor of a clsuter, if it doesn't have this information
- * - Detect vendor and microarchitecture
- * - Copy MIDR, vendor, and microarchitecture to all processors of a cluster, overwriting
- * current values for the processors in the group.
+ * Topology information about some or all logical processors may be unavailable, for the following reasons:
+ * - Linux kernel is too old, or configured without support for topology information in sysfs.
+ * - Core is offline, and Linux kernel is configured to not report topology for offline cores.
+ *
+ * In this case, we assign processors to clusters using two methods:
+ * - Try heuristic cluster configurations (e.g. 6-core SoC usually has 4+2 big.LITTLE configuration).
+ * - If heuristic failed, assign processors to core clusters in a sequential scan.
*/
+ if (!cpuinfo_arm_linux_detect_core_clusters_by_heuristic(usable_processors, arm_linux_processors_count, arm_linux_processors)) {
+ cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(arm_linux_processors_count, arm_linux_processors);
+ }
+ }
- for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
- }
+ cpuinfo_arm_linux_count_cluster_processors(arm_linux_processors_count, arm_linux_processors);
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
- continue;
- }
+#if defined(__ANDROID__)
+ const struct cpuinfo_arm_chipset chipset =
+ cpuinfo_arm_android_decode_chipset(&android_properties, usable_processors, 0);
+#endif
- const uint32_t group_min_processor_id = arm_linux_processors[i].package_group_min;
- if (i != group_min_processor_id) {
- if (!bitmask_all(arm_linux_processors[group_min_processor_id].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
- cpuinfo_log_debug("copied MIDR %08"PRIx32" from processor %"PRIu32" to group min processor %"PRIu32,
- arm_linux_processors[i].midr, i, group_min_processor_id);
- arm_linux_processors[group_min_processor_id].midr = arm_linux_processors[i].midr;
- arm_linux_processors[group_min_processor_id].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
- }
- }
- }
+ const uint32_t cluster_count = cpuinfo_arm_linux_detect_cluster_midr(
+#if defined(__ANDROID__)
+ &chipset,
+#endif
+ arm_linux_processors_count, usable_processors, arm_linux_processors);
- for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- const uint32_t group_min_processor_id = arm_linux_processors[i].package_group_min;
- if (i == group_min_processor_id) {
- /* Decode vendor and uarch only once per cluster */
+ /* Initialize core vendor, uarch, and MIDR for every logical processor */
+ for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
+ if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ const uint32_t cluster_leader = arm_linux_processors[i].package_group_min;
+ if (cluster_leader == i) {
+ /* Cluster leader: decode core vendor and uarch */
cpuinfo_arm_decode_vendor_uarch(
- arm_linux_processors[i].midr,
+ arm_linux_processors[cluster_leader].midr,
#if CPUINFO_ARCH_ARM
- !!(arm_linux_processors[i].features & CPUINFO_ARM_LINUX_FEATURE_VFPV4),
+ !!(arm_linux_processors[cluster_leader].features & CPUINFO_ARM_LINUX_FEATURE_VFPV4),
#endif
- &arm_linux_processors[i].vendor,
- &arm_linux_processors[i].uarch);
+ &arm_linux_processors[cluster_leader].vendor,
+ &arm_linux_processors[cluster_leader].uarch);
} else {
- arm_linux_processors[i].midr = arm_linux_processors[group_min_processor_id].midr;
- arm_linux_processors[i].vendor = arm_linux_processors[group_min_processor_id].vendor;
- arm_linux_processors[i].uarch = arm_linux_processors[group_min_processor_id].uarch;
+ /* Cluster non-leader: copy vendor, uarch, and MIDR from cluster leader */
+ arm_linux_processors[i].flags =
+ (arm_linux_processors[i].flags & ~CPUINFO_ARM_LINUX_VALID_MIDR) |
+ (arm_linux_processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_MIDR);
+ arm_linux_processors[i].midr = arm_linux_processors[cluster_leader].midr;
+ arm_linux_processors[i].vendor = arm_linux_processors[cluster_leader].vendor;
+ arm_linux_processors[i].uarch = arm_linux_processors[cluster_leader].uarch;
}
}
-
}
- /*
- * At this point, we figured out the core clusters. Count the number of cores in each clusters:
- * - In the first pass, for each logical processor increment the count in group-minimum processor.
- * - In the second pass, copy the count from group-minimum processor.
- */
- for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
- }
-
- arm_linux_processors[arm_linux_processors[i].package_group_min].package_processor_count += 1;
- }
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
- if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
- continue;
+ if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ cpuinfo_log_debug("post-analysis processor %"PRIu32" MIDR 0x%08"PRIx32,
+ i, arm_linux_processors[i].midr);
}
-
- arm_linux_processors[i].package_processor_count =
- arm_linux_processors[arm_linux_processors[i].package_group_min].package_processor_count;
- }
+ }
qsort(arm_linux_processors, arm_linux_processors_count,
sizeof(struct cpuinfo_arm_linux_processor), cmp_x86_processor_by_apic_id);
@@ -480,11 +344,6 @@ void cpuinfo_arm_linux_init(void) {
};
}
-#if defined(__ANDROID__)
- const struct cpuinfo_arm_chipset chipset =
- cpuinfo_arm_android_decode_chipset(&android_properties, usable_processors, 0);
-#endif
-
/*
* Assumptions:
* - No SMP (i.e. each core supports only one hardware thread).
diff --git a/src/arm/linux/midr.c b/src/arm/linux/midr.c
new file mode 100644
index 0000000..9f0d0e5
--- /dev/null
+++ b/src/arm/linux/midr.c
@@ -0,0 +1,762 @@
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cpuinfo.h>
+#include <arm/linux/api.h>
+#if defined(__ANDROID__)
+ #include <arm/android/api.h>
+#endif
+#include <arm/api.h>
+#include <arm/midr.h>
+#include <linux/api.h>
+#include <api.h>
+#include <log.h>
+
+
+#define CPUINFO_COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
+
+#define CLUSTERS_MAX 3
+
+static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
+ return (bitfield & mask) == mask;
+}
+
+/* Description of heterogeneous multi-processing core configuration in a chipset (identified by series and model number) */
+struct hmp_config {
+ /* Number of cores (logical processors) */
+ uint8_t cores;
+ /* ARM chipset series (see cpuinfo_arm_chipset_series enum) */
+ uint8_t series;
+ /* Chipset model number (see cpuinfo_arm_chipset struct) */
+ uint16_t model;
+ /* Number of heterogenous clusters in the CPU package */
+ uint8_t clusters;
+ /*
+ * Number of cores in each cluster:
+ * - big.LITTLE configurations: [0] = # LITTLE cores, [1] = # big cores
+ * - Max.Med.Min configurations: [0] = # Min cores, [1] = # Med cores, [2] = # Max cores
+ */
+ uint8_t cluster_cores[CLUSTERS_MAX];
+ /*
+ * MIDR of cores in each cluster:
+ * - big.LITTLE configurations: [0] = LITTLE core MIDR, [1] = big core MIDR
+ * - Max.Med.Min configurations: [0] = Min core MIDR, [1] = Med core MIDR, [2] = Max core MIDR
+ */
+ uint32_t cluster_midr[CLUSTERS_MAX];
+};
+
+/*
+ * The list of chipsets with more than one type of cores (i.e. 4x Cortex-A53 + 4x Cortex-A53 is out) and buggy kernels
+ * which report MIDR information only about some cores in /proc/cpuinfo (either only online cores, or only the core
+ * that reads /proc/cpuinfo). On these kernels, it is not possible to detect all core types by just parsing
+ * /proc/cpuinfo, so we use chipset name and this table to find their MIDR (and thus microarchitecture, cache, etc).
+ *
+ * Note: not all chipsets with heterogeneous multiprocessing need an entry in this table. The following HMP
+ * chipsets always list information about all cores in /proc/cpuinfo:
+ *
+ * - Snapdragon 660
+ * - Snapdragon 820 (MSM8996)
+ * - Snapdragon 821 (MSM8996PRO)
+ * - Snapdragon 835 (MSM8998)
+ * - Exynos 8895
+ * - Kirin 960
+ *
+ * As these are all new processors, there is hope that this table won't uncontrollably grow over time.
+ */
+static const struct hmp_config hmp_configs[] = {
+ {
+ /* MSM8956 (Snapdragon 650): 2x Cortex-A72 + 4x Cortex-A53 */
+ .cores = 6,
+ .series = cpuinfo_arm_chipset_series_qualcomm_msm,
+ .model = UINT16_C(8956),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x410FD080),
+ },
+ },
+ {
+ /* MSM8976/MSM8976PRO (Snapdragon 652/653): 4x Cortex-A72 + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_qualcomm_msm,
+ .model = UINT16_C(8976),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x410FD080),
+ },
+ },
+ {
+ /* MSM8992 (Snapdragon 808): 2x Cortex-A57 + 4x Cortex-A53 */
+ .cores = 6,
+ .series = cpuinfo_arm_chipset_series_qualcomm_msm,
+ .model = UINT16_C(8992),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD033),
+ [1] = UINT32_C(0x411FD072),
+ },
+ },
+ {
+ /* MSM8994/MSM8994V (Snapdragon 810): 4x Cortex-A57 + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_qualcomm_msm,
+ .model = UINT16_C(8994),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD032),
+ [1] = UINT32_C(0x411FD071),
+ },
+ },
+#if CPUINFO_ARCH_ARM
+ {
+ /* Exynos 5422: 4x Cortex-A15 + 4x Cortex-A7 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_samsung_exynos,
+ .model = UINT16_C(5422),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FC073),
+ [1] = UINT32_C(0x412FC0F3),
+ },
+ },
+ {
+ /* Exynos 5430: 4x Cortex-A15 + 4x Cortex-A7 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_samsung_exynos,
+ .model = UINT16_C(5430),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FC074),
+ [1] = UINT32_C(0x413FC0F3),
+ },
+ },
+#endif /* CPUINFO_ARCH_ARM */
+ {
+ /* Exynos 5433: 4x Cortex-A57 + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_samsung_exynos,
+ .model = UINT16_C(5433),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD031),
+ [1] = UINT32_C(0x411FD070),
+ },
+ },
+ {
+ /* Exynos 7420: 4x Cortex-A57 + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_samsung_exynos,
+ .model = UINT16_C(7420),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD032),
+ [1] = UINT32_C(0x411FD070),
+ },
+ },
+ {
+ /* Exynos 8890: 4x Mongoose + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_samsung_exynos,
+ .model = UINT16_C(8890),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x531F0011),
+ },
+ },
+#if CPUINFO_ARCH_ARM
+ {
+ /* Kirin 920: 4x Cortex-A15 + 4x Cortex-A7 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_samsung_exynos,
+ .model = UINT16_C(920),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FC070) /* TODO: clarify */,
+ [1] = UINT32_C(0x410FC0F0) /* TODO: clarify */,
+ },
+ },
+ {
+ /* Kirin 925: 4x Cortex-A15 + 4x Cortex-A7 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
+ .model = UINT16_C(925),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FC070) /* TODO: clarify */,
+ [1] = UINT32_C(0x410FC0F0) /* TODO: clarify */,
+ },
+ },
+#endif /* CPUINFO_ARCH_ARM */
+ {
+ /* Kirin 950: 4x Cortex-A72 + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
+ .model = UINT16_C(950),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x410FD080),
+ },
+ },
+ {
+ /* Kirin 955: 4x Cortex-A72 + 4x Cortex-A53 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_hisilicon_kirin,
+ .model = UINT16_C(955),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x410FD080),
+ },
+ },
+#if CPUINFO_ARCH_ARM
+ {
+ /* MediaTek MT8135: 2x Cortex-A7 + 2x Cortex-A15 */
+ .cores = 4,
+ .series = cpuinfo_arm_chipset_series_mediatek_mt,
+ .model = UINT16_C(8135),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 2,
+ [1] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FC073),
+ [1] = UINT32_C(0x413FC0F2),
+ },
+ },
+#endif
+ {
+ /* MediaTek MT8173: 2x Cortex-A72 + 2x Cortex-A53 */
+ .cores = 4,
+ .series = cpuinfo_arm_chipset_series_mediatek_mt,
+ .model = UINT16_C(8173),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 2,
+ [1] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD032),
+ [1] = UINT32_C(0x410FD080),
+ },
+ },
+ {
+ /* MediaTek MT8176: 2x Cortex-A72 + 4x Cortex-A53 */
+ .cores = 6,
+ .series = cpuinfo_arm_chipset_series_mediatek_mt,
+ .model = UINT16_C(8176),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD032),
+ [1] = UINT32_C(0x410FD080),
+ },
+ },
+#if CPUINFO_ARCH_ARM
+ {
+ /* MediaTek MT6595: 4x Cortex-A17 + 4x Cortex-A7 */
+ .cores = 8,
+ .series = cpuinfo_arm_chipset_series_mediatek_mt,
+ .model = UINT16_C(6595),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FC075),
+ [1] = UINT32_C(0x410FC0E0),
+ },
+ },
+#endif
+ {
+ /* MediaTek MT6797: 2x Cortex-A72 + 4x Cortex-A53 + 4x Cortex-A53 */
+ .cores = 10,
+ .series = cpuinfo_arm_chipset_series_mediatek_mt,
+ .model = UINT16_C(6797),
+ .clusters = 3,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ [2] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x410FD034),
+ [2] = UINT32_C(0x410FD081),
+ },
+ },
+ {
+ /* MediaTek MT6799: 2x Cortex-A73 + 4x Cortex-A53 + 4x Cortex-A35 */
+ .cores = 10,
+ .series = cpuinfo_arm_chipset_series_mediatek_mt,
+ .model = UINT16_C(6799),
+ .clusters = 3,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 4,
+ [2] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD041),
+ [1] = UINT32_C(0x410FD034),
+ [2] = UINT32_C(0x410FD092),
+ },
+ },
+ {
+ /* Rockchip RK3399: 2x Cortex-A72 + 4x Cortex-A53 */
+ .cores = 6,
+ .series = cpuinfo_arm_chipset_series_rockchip_rk,
+ .model = UINT16_C(3399),
+ .clusters = 2,
+ .cluster_cores = {
+ [0] = 4,
+ [1] = 2,
+ },
+ .cluster_midr = {
+ [0] = UINT32_C(0x410FD034),
+ [1] = UINT32_C(0x410FD082),
+ },
+ },
+};
+
+#if defined(__ANDROID__)
+/*
+ * Searches chipset name in mapping of chipset name to cores' MIDR values. If match is successful, initializes MIDR
+ * for all clusters' leaders with tabulated values.
+ *
+ * @param[in] chipset - chipset (SoC) name information.
+ * @param clusters_count - number of CPU core clusters detected in the SoC.
+ * @param cluster_leaders - indices of core clusters' leaders in the @p processors array.
+ * @param processors_count - number of usable logical processors in the system.
+ * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
+ * and decoded core cluster (package_group_min) information.
+ * Upon successful return, processors[i].midr for all clusters' leaders contains the
+ * tabulated MIDR values.
+ * @param verify_midr - indicated whether the function should check that the MIDR values to be assigned to leaders of
+ * core clusters are consistent with known parts of their parsed values.
+ * Set if to false if the only MIDR value parsed from /proc/cpuinfo is for the last processor
+ * reported in /proc/cpuinfo and thus can't be unambiguously attributed to that processor.
+ *
+ * @retval true if the chipset was found in the mapping and core clusters' leaders initialized with MIDR values.
+ * @retval false if the chipset was not found in the mapping, or any consistency check failed.
+ */
+static bool cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
+ const struct cpuinfo_arm_chipset chipset[restrict static 1],
+ uint32_t clusters_count,
+ const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],
+ uint32_t processors_count,
+ struct cpuinfo_arm_linux_processor processors[restrict static processors_count],
+ bool verify_midr)
+{
+ if (clusters_count <= CLUSTERS_MAX) {
+ for (uint32_t c = 0; c < CPUINFO_COUNT_OF(hmp_configs); c++) {
+ if (hmp_configs[c].model == chipset->model && hmp_configs[c].series == chipset->series) {
+ /* Verify that the total number of cores and clusters of cores matches expectation */
+ if (hmp_configs[c].cores != processors_count || hmp_configs[c].clusters != clusters_count) {
+ return false;
+ }
+
+ /* Verify that core cluster configuration matches expectation */
+ for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
+ const uint32_t cluster_leader = cluster_leaders[cluster];
+ if (hmp_configs[c].cluster_cores[cluster] != processors[cluster_leader].package_processor_count) {
+ return false;
+ }
+ }
+
+ if (verify_midr) {
+ /* Verify known parts of MIDR */
+ for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
+ const uint32_t cluster_leader = cluster_leaders[cluster];
+
+ /* Create a mask of known midr bits */
+ uint32_t midr_mask = 0;
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK;
+ }
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK;
+ }
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ midr_mask |= CPUINFO_ARM_MIDR_PART_MASK;
+ }
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK;
+ }
+
+ /* Verify the bits under the mask */
+ if ((processors[cluster_leader].midr ^ hmp_configs[c].cluster_midr[cluster]) & midr_mask) {
+ cpuinfo_log_debug("parsed MIDR of cluster %08"PRIu32" does not match tabulated value %08"PRIu32,
+ processors[cluster_leader].midr, hmp_configs[c].cluster_midr[cluster]);
+ return false;
+ }
+ }
+ }
+
+ /* Assign MIDRs according to tabulated configurations */
+ for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
+ const uint32_t cluster_leader = cluster_leaders[cluster];
+ processors[cluster_leader].midr = hmp_configs[c].cluster_midr[cluster];
+ processors[cluster_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
+ cpuinfo_log_debug("cluster %"PRIu32" MIDR = 0x%08"PRIx32, cluster, hmp_configs[c].cluster_midr[cluster]);
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+#endif
+
+/*
+ * Initializes MIDR for leaders of core clusters using a heuristic for big.LITTLE systems:
+ * - If the only known MIDR is for the big core cluster, guess the matching MIDR for the LITTLE cluster.
+ * - Estimate which of the clusters is big using maximum frequency, if known, otherwise using system processor ID.
+ * - Initialize the MIDR for big and LITTLE core clusters using the guesstimates values.
+ *
+ * @param clusters_count - number of CPU core clusters detected in the SoC.
+ * @param cluster_with_midr_count - number of CPU core clusters in the SoC with known MIDR values.
+ * @param last_processor_with_midr - index of the last logical processor with known MIDR in the @p processors array.
+ * @param cluster_leaders - indices of core clusters' leaders in the @p processors array.
+ * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
+ * and decoded core cluster (package_group_min) information.
+ * Upon successful return, processors[i].midr for all core clusters' leaders contains
+ * the heuristically detected MIDR value.
+ * @param verify_midr - indicated whether the function should check that the MIDR values to be assigned to leaders of
+ * core clusters are consistent with known parts of their parsed values.
+ * Set if to false if the only MIDR value parsed from /proc/cpuinfo is for the last processor
+ * reported in /proc/cpuinfo and thus can't be unambiguously attributed to that processor.
+ *
+ * @retval true if this is a big.LITTLE system with only one known MIDR and the CPU core clusters' leaders were
+ * initialized with MIDR values.
+ * @retval false if this is not a big.LITTLE system.
+ */
+static bool cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
+ uint32_t clusters_count,
+ uint32_t cluster_with_midr_count,
+ uint32_t last_processor_with_midr,
+ const uint32_t cluster_leaders[restrict static CLUSTERS_MAX],
+ struct cpuinfo_arm_linux_processor processors[restrict static last_processor_with_midr],
+ bool verify_midr)
+{
+ if (clusters_count != 2 || cluster_with_midr_count != 1) {
+ /* Not a big.LITTLE system, or MIDR is known for both/neither clusters */
+ return false;
+ }
+
+ const uint32_t midr_flags =
+ (processors[processors[last_processor_with_midr].package_group_min].flags & CPUINFO_ARM_LINUX_VALID_MIDR);
+ const uint32_t big_midr = processors[processors[last_processor_with_midr].package_group_min].midr;
+ const uint32_t little_midr = midr_little_core_for_big(big_midr);
+
+ /* Default assumption: the first reported cluster is LITTLE cluster (this holds on most Linux kernels) */
+ uint32_t little_cluster_leader = cluster_leaders[0];
+ const uint32_t other_cluster_leader = cluster_leaders[1];
+ /* If maximum frequency is known for both clusters, assume LITTLE cluster is the one with lower frequency */
+ if (processors[little_cluster_leader].flags & processors[other_cluster_leader].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ if (processors[little_cluster_leader].max_frequency > processors[other_cluster_leader].max_frequency) {
+ little_cluster_leader = other_cluster_leader;
+ }
+ }
+
+ if (verify_midr) {
+ /* Verify known parts of MIDR */
+ for (uint32_t cluster = 0; cluster < clusters_count; cluster++) {
+ const uint32_t cluster_leader = cluster_leaders[cluster];
+
+ /* Create a mask of known midr bits */
+ uint32_t midr_mask = 0;
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
+ midr_mask |= CPUINFO_ARM_MIDR_IMPLEMENTER_MASK;
+ }
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
+ midr_mask |= CPUINFO_ARM_MIDR_VARIANT_MASK;
+ }
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_PART) {
+ midr_mask |= CPUINFO_ARM_MIDR_PART_MASK;
+ }
+ if (processors[cluster_leader].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
+ midr_mask |= CPUINFO_ARM_MIDR_REVISION_MASK;
+ }
+
+ /* Verify the bits under the mask */
+ const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr;
+ if ((processors[cluster_leader].midr ^ midr) & midr_mask) {
+ cpuinfo_log_debug(
+ "parsed MIDR %08"PRIu32" of cluster leader %"PRIu32" is inconsistent with expected value %08"PRIu32,
+ processors[cluster_leader].midr, cluster_leader, midr);
+ return false;
+ }
+ }
+ }
+
+ for (uint32_t c = 0; c < clusters_count; c++) {
+ /* Skip cluster with already assigned MIDR */
+ const uint32_t cluster_leader = cluster_leaders[c];
+ if (bitmask_all(processors[cluster_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
+ continue;
+ }
+
+ const uint32_t midr = (cluster_leader == little_cluster_leader) ? little_midr : big_midr;
+ cpuinfo_log_info("assume processor %"PRIu32" to have MIDR %08"PRIx32, cluster_leader, midr);
+ /* To be consistent, we copy the MIDR entirely, rather than by parts */
+ processors[cluster_leader].midr = midr;
+ processors[cluster_leader].flags |= midr_flags;
+ }
+ return true;
+}
+
+/*
+ * Initializes MIDR for leaders of core clusters in a single sequential scan:
+ * - Clusters preceeding the first reported MIDR value are assumed to have default MIDR value.
+ * - Clusters following any reported MIDR value to have that MIDR value.
+ *
+ * @param default_midr - MIDR value that will be assigned to cluster leaders preceeding any reported MIDR value.
+ * @param processors_count - number of logical processor descriptions in the @p processors array.
+ * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
+ * and decoded core cluster (package_group_min) information.
+ * Upon successful return, processors[i].midr for all core clusters' leaders contains
+ * the assigned MIDR value.
+ */
+static void cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
+ uint32_t default_midr,
+ uint32_t processors_count,
+ struct cpuinfo_arm_linux_processor processors[restrict static processors_count])
+{
+ uint32_t midr = default_midr;
+ for (uint32_t i = 0; i < processors_count; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ if (processors[i].package_group_min == i) {
+ if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
+ midr = processors[i].midr;
+ } else {
+ cpuinfo_log_info("assume processor %"PRIu32" to have MIDR %08"PRIx32, i, midr);
+ /* To be consistent, we copy the MIDR entirely, rather than by parts */
+ processors[i].midr = midr;
+ processors[i].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Detects MIDR of each CPU core clusters' leader.
+ *
+ * @param[in] chipset - chipset (SoC) name information.
+ * @param max_processors - number of processor descriptions in the @p processors array.
+ * @param usable_processors - number of processor descriptions in the @p processors array with both POSSIBLE and
+ * PRESENT flags.
+ * @param[in,out] processors - array of logical processor descriptions with pre-parsed MIDR, maximum frequency,
+ * and decoded core cluster (package_group_min) information.
+ * Upon return, processors[i].midr for all clusters' leaders contains the MIDR value.
+ *
+ * @returns The number of core clusters
+ */
+uint32_t cpuinfo_arm_linux_detect_cluster_midr(
+#if defined(__ANDROID__)
+ const struct cpuinfo_arm_chipset chipset[restrict static 1],
+#endif
+ uint32_t max_processors,
+ uint32_t usable_processors,
+ struct cpuinfo_arm_linux_processor processors[restrict static max_processors])
+{
+ uint32_t clusters_count = 0;
+ uint32_t cluster_leaders[CLUSTERS_MAX];
+ uint32_t last_processor_in_cpuinfo = max_processors;
+ uint32_t last_processor_with_midr = max_processors;
+ uint32_t processors_with_midr_count = 0;
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE)) {
+ if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR) {
+ last_processor_in_cpuinfo = i;
+ }
+ if (bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_IMPLEMENTER | CPUINFO_ARM_LINUX_VALID_PART)) {
+ last_processor_with_midr = i;
+ processors_with_midr_count += 1;
+ }
+ const uint32_t group_leader = processors[i].package_group_min;
+ if (group_leader == i) {
+ if (clusters_count < CLUSTERS_MAX) {
+ cluster_leaders[clusters_count] = i;
+ }
+ clusters_count += 1;
+ } else {
+ /* Copy known bits of information to cluster leader */
+
+ if ((processors[i].flags & ~processors[group_leader].flags) & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
+ processors[group_leader].max_frequency = processors[i].max_frequency;
+ processors[group_leader].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
+ }
+ if (!bitmask_all(processors[group_leader].flags, CPUINFO_ARM_LINUX_VALID_MIDR) &&
+ bitmask_all(processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR))
+ {
+ processors[group_leader].midr = processors[i].midr;
+ processors[group_leader].flags |= CPUINFO_ARM_LINUX_VALID_MIDR;
+ }
+ }
+ }
+ }
+ cpuinfo_log_debug("detected %"PRIu32" core clusters", clusters_count);
+
+ /*
+ * Two relations between reported /proc/cpuinfo information, and cores is possible:
+ * - /proc/cpuinfo reports information for all or some of the cores below the corresponding
+ * "processor : <number>" lines. Information on offline cores may be missing.
+ * - /proc/cpuinfo reports information only once, after all "processor : <number>" lines.
+ * The reported information may relate to processor #0 or to the processor which
+ * executed the system calls to read /proc/cpuinfo. It is also indistinguishable
+ * from /proc/cpuinfo reporting information only for the last core (e.g. if all other
+ * cores are offline).
+ *
+ * We detect the second case by checking if /proc/cpuinfo contains valid MIDR only for one,
+ * last reported, processor. Note, that the last reported core may be not the last
+ * present & possible processor, as /proc/cpuinfo may non-report high-index offline cores.
+ */
+ if (processors_with_midr_count == 1 && last_processor_in_cpuinfo == last_processor_with_midr && clusters_count > 1) {
+ /*
+ * There are multiple core clusters, but /proc/cpuinfo reported MIDR only for one
+ * processor, and we don't even know which logical processor this information refers to.
+ *
+ * We make three attempts to detect MIDR for all clusters:
+ * 1. Search tabulated MIDR values for chipsets which have heterogeneous clusters and ship with Linux
+ * kernels which do not always report all cores in /proc/cpuinfo. If found, use the tabulated values.
+ * 2. For systems with 2 clusters and MIDR known for one cluster, assume big.LITTLE configuration,
+ * and estimate MIDR for the other cluster under assumption that MIDR for the big cluster is known.
+ * 3. Initialize MIDRs for all core clusters to the only parsed MIDR value.
+ */
+ cpuinfo_log_debug("the only reported MIDR can not be attributed to a particular processor");
+
+ #if defined(__ANDROID__)
+ if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
+ chipset, clusters_count, cluster_leaders, usable_processors, processors, false))
+ {
+ return clusters_count;
+ }
+ #endif
+
+
+ /* Try big.LITTLE heuristic */
+ if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
+ clusters_count, 1, last_processor_with_midr,
+ cluster_leaders, processors, false))
+ {
+ return clusters_count;
+ }
+
+ /* Fall back to sequential initialization of MIDR values for core clusters */
+ cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
+ processors[processors[last_processor_with_midr].package_group_min].midr,
+ max_processors, processors);
+ } else if (processors_with_midr_count < usable_processors) {
+ /*
+ * /proc/cpuinfo reported MIDR only for some processors, and probably some core clusters do not have MIDR
+ * for any of the cores. Check if this is the case.
+ */
+ uint32_t clusters_with_midr_count = 0;
+ for (uint32_t i = 0; i < max_processors; i++) {
+ if (bitmask_all(processors[i].flags, CPUINFO_LINUX_MASK_USABLE | CPUINFO_ARM_LINUX_VALID_MIDR)) {
+ if (processors[i].package_group_min == i) {
+ clusters_with_midr_count += 1;
+ }
+ }
+ }
+
+ if (clusters_with_midr_count < clusters_count) {
+ /*
+ * /proc/cpuinfo reported MIDR only for some clusters, need to reconstruct others.
+ * We make three attempts to detect MIDR for clusters without it:
+ * 1. Search tabulated MIDR values for chipsets which have heterogeneous clusters and ship with Linux
+ * kernels which do not always report all cores in /proc/cpuinfo. If found, use the tabulated values.
+ * 2. For systems with 2 clusters and MIDR known for one cluster, assume big.LITTLE configuration,
+ * and estimate MIDR for the other cluster under assumption that MIDR for the big cluster is known.
+ * 3. Initialize MIDRs for core clusters in a single sequential scan:
+ * - Clusters preceeding the first reported MIDR value are assumed to have the last reported MIDR value.
+ * - Clusters following any reported MIDR value to have that MIDR value.
+ */
+
+ if (cpuinfo_arm_linux_detect_cluster_midr_by_chipset(
+ chipset, clusters_count, cluster_leaders, usable_processors, processors, true))
+ {
+ return clusters_count;
+ }
+
+ if (last_processor_with_midr != max_processors) {
+ /* Try big.LITTLE heuristic */
+ if (cpuinfo_arm_linux_detect_cluster_midr_by_big_little_heuristic(
+ clusters_count, processors_with_midr_count, last_processor_with_midr,
+ cluster_leaders, processors, true))
+ {
+ return clusters_count;
+ }
+
+ /* Fall back to sequential initialization of MIDR values for core clusters */
+ cpuinfo_arm_linux_detect_cluster_midr_by_sequential_scan(
+ processors[processors[last_processor_with_midr].package_group_min].midr,
+ max_processors, processors);
+ }
+ }
+ }
+ return clusters_count;
+}