summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_defs.h
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_defs.h')
-rw-r--r--mali_kbase/mali_kbase_defs.h194
1 files changed, 142 insertions, 52 deletions
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index d8f6f75..df1a6f0 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -34,6 +34,7 @@
#include <mali_kbase_mmu_hw.h>
#include <mali_kbase_mmu_mode.h>
#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_pm.h>
#include <linux/atomic.h>
#include <linux/mempool.h>
@@ -229,6 +230,39 @@ struct kbase_jd_atom_dependency {
};
/**
+ * struct kbase_io_access - holds information about 1 register access
+ *
+ * @addr: first bit indicates r/w (r=0, w=1)
+ * @value: value written or read
+ */
+struct kbase_io_access {
+ uintptr_t addr;
+ u32 value;
+};
+
+/**
+ * struct kbase_io_history - keeps track of all recent register accesses
+ *
+ * @enabled: true if register accesses are recorded, false otherwise
+ * @lock: spinlock protecting kbase_io_access array
+ * @count: number of registers read/written
+ * @size: number of elements in kbase_io_access array
+ * @buf: array of kbase_io_access
+ */
+struct kbase_io_history {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ bool enabled;
+#else
+ u32 enabled;
+#endif
+
+ spinlock_t lock;
+ size_t count;
+ u16 size;
+ struct kbase_io_access *buf;
+};
+
+/**
* @brief The function retrieves a read-only reference to the atom field from
* the kbase_jd_atom_dependency structure
*
@@ -302,15 +336,17 @@ enum kbase_atom_gpu_rb_state {
KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB,
/* Atom is in slot ringbuffer but is blocked on a previous atom */
KBASE_ATOM_GPU_RB_WAITING_BLOCKED,
- /* Atom is in slot ringbuffer but is waiting for proected mode exit */
- KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_EXIT,
+ /* Atom is in slot ringbuffer but is waiting for a previous protected
+ * mode transition to complete */
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
+ /* Atom is in slot ringbuffer but is waiting for proected mode
+ * transition */
+ KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
/* Atom is in slot ringbuffer but is waiting for cores to become
* available */
KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
/* Atom is in slot ringbuffer but is blocked on affinity */
KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
- /* Atom is in slot ringbuffer but is waiting for protected mode entry */
- KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_ENTRY,
/* Atom is in slot ringbuffer and ready to run */
KBASE_ATOM_GPU_RB_READY,
/* Atom is in slot ringbuffer and has been submitted to the GPU */
@@ -320,20 +356,41 @@ enum kbase_atom_gpu_rb_state {
KBASE_ATOM_GPU_RB_RETURN_TO_JS
};
+enum kbase_atom_enter_protected_state {
+ /*
+ * Starting state:
+ * Check if a transition into protected mode is required.
+ *
+ * NOTE: The integer value of this must
+ * match KBASE_ATOM_EXIT_PROTECTED_CHECK.
+ */
+ KBASE_ATOM_ENTER_PROTECTED_CHECK = 0,
+ /* Wait for vinstr to suspend. */
+ KBASE_ATOM_ENTER_PROTECTED_VINSTR,
+ /* Wait for the L2 to become idle in preparation for
+ * the coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2,
+ /* End state;
+ * Prepare coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED,
+};
+
enum kbase_atom_exit_protected_state {
/*
* Starting state:
* Check if a transition out of protected mode is required.
+ *
+ * NOTE: The integer value of this must
+ * match KBASE_ATOM_ENTER_PROTECTED_CHECK.
*/
- KBASE_ATOM_EXIT_PROTECTED_CHECK,
- /* Wait for the L2 to become idle in preparation for the reset. */
+ KBASE_ATOM_EXIT_PROTECTED_CHECK = 0,
+ /* Wait for the L2 to become idle in preparation
+ * for the reset. */
KBASE_ATOM_EXIT_PROTECTED_IDLE_L2,
/* Issue the protected reset. */
KBASE_ATOM_EXIT_PROTECTED_RESET,
- /*
- * End state;
- * Wait for the reset to complete.
- */
+ /* End state;
+ * Wait for the reset to complete. */
KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT,
};
@@ -498,7 +555,13 @@ struct kbase_jd_atom {
* event_code when the atom is processed. */
enum base_jd_event_code will_fail_event_code;
- enum kbase_atom_exit_protected_state exit_protected_state;
+ /* Atoms will only ever be transitioning into, or out of
+ * protected mode so we do not need two separate fields.
+ */
+ union {
+ enum kbase_atom_enter_protected_state enter;
+ enum kbase_atom_exit_protected_state exit;
+ } protected_state;
struct rb_node runnable_tree_node;
@@ -601,19 +664,19 @@ struct kbase_as {
struct work_struct work_pagefault;
struct work_struct work_busfault;
enum kbase_mmu_fault_type fault_type;
+ bool protected_mode;
u32 fault_status;
u64 fault_addr;
u64 fault_extra_addr;
- struct mutex transaction_mutex;
struct kbase_mmu_setup current_setup;
/* BASE_HW_ISSUE_8316 */
struct workqueue_struct *poke_wq;
struct work_struct poke_work;
- /** Protected by kbasep_js_device_data::runpool_irq::lock */
+ /** Protected by hwaccess_lock */
int poke_refcount;
- /** Protected by kbasep_js_device_data::runpool_irq::lock */
+ /** Protected by hwaccess_lock */
kbase_as_poke_state poke_state;
struct hrtimer poke_timer;
};
@@ -734,8 +797,7 @@ struct kbase_trace_kbdev_timeline {
* But it's kept as an example of how to add global timeline tracking
* information
*
- * The caller must hold kbasep_js_device_data::runpool_irq::lock when
- * accessing this */
+ * The caller must hold hwaccess_lock when accessing this */
u8 slot_atoms_submitted[BASE_JM_MAX_NR_SLOTS];
/* Last UID for each PM event */
@@ -744,7 +806,7 @@ struct kbase_trace_kbdev_timeline {
atomic_t pm_event_uid_counter;
/*
* L2 transition state - true indicates that the transition is ongoing
- * Expected to be protected by pm.power_change_lock */
+ * Expected to be protected by hwaccess_lock */
bool l2_transitioning;
};
#endif /* CONFIG_MALI_TRACE_TIMELINE */
@@ -786,19 +848,6 @@ struct kbase_pm_device_data {
u64 debug_core_mask_all;
/**
- * Lock protecting the power state of the device.
- *
- * This lock must be held when accessing the shader_available_bitmap,
- * tiler_available_bitmap, l2_available_bitmap, shader_inuse_bitmap and
- * tiler_inuse_bitmap fields of kbase_device, and the ca_in_transition
- * and shader_poweroff_pending fields of kbase_pm_device_data. It is
- * also held when the hardware power registers are being written to, to
- * ensure that two threads do not conflict over the power transitions
- * that the hardware should make.
- */
- spinlock_t power_change_lock;
-
- /**
* Callback for initializing the runtime power management.
*
* @param kbdev The kbase device
@@ -1108,6 +1157,11 @@ struct kbase_device {
/* Total number of created contexts */
atomic_t ctx_num;
+#ifdef CONFIG_DEBUG_FS
+ /* Holds the most recent register accesses */
+ struct kbase_io_history io_history;
+#endif /* CONFIG_DEBUG_FS */
+
struct kbase_hwaccess_data hwaccess;
/* Count of page/bus faults waiting for workqueues to process */
@@ -1125,6 +1179,8 @@ struct kbase_device {
#endif
size_t mem_pool_max_size_default;
+ /* current gpu coherency mode */
+ u32 current_gpu_coherency_mode;
/* system coherency mode */
u32 system_coherency;
/* Flag to track when cci snoops have been enabled on the interface */
@@ -1171,6 +1227,11 @@ struct kbase_device {
/* list of inited sub systems. Used during terminate/error recovery */
u32 inited_subsys;
+
+ spinlock_t hwaccess_lock;
+
+ /* Protects access to MMU operations */
+ struct mutex mmu_hw_mutex;
};
/**
@@ -1181,7 +1242,7 @@ struct kbase_device {
* dependencies. Atoms on this list will be moved to the
* runnable_tree when the blocking atom completes.
*
- * runpool_irq.lock must be held when accessing this structure.
+ * hwaccess_lock must be held when accessing this structure.
*/
struct jsctx_queue {
struct rb_root runnable_tree;
@@ -1193,6 +1254,52 @@ struct jsctx_queue {
(((minor) & 0xFFF) << 8) | \
((0 & 0xFF) << 0))
+/**
+ * enum kbase_context_flags - Flags for kbase contexts
+ *
+ * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
+ * process on a 64-bit kernel.
+ *
+ * @KCTX_RUNNABLE_REF: Set when context is counted in
+ * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
+ *
+ * @KCTX_ACTIVE: Set when the context is active.
+ *
+ * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
+ * context.
+ *
+ * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
+ * initialized.
+ *
+ * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
+ * allocations. Existing allocations will not change.
+ *
+ * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
+ *
+ * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
+ * scheduled in.
+ *
+ * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
+ * This is only ever updated whilst the jsctx_mutex is held.
+ *
+ * @KCTX_DYING: Set when the context process is in the process of being evicted.
+ *
+ * All members need to be separate bits. This enum is intended for use in a
+ * bitmask where multiple values get OR-ed together.
+ */
+enum kbase_context_flags {
+ KCTX_COMPAT = 1U << 0,
+ KCTX_RUNNABLE_REF = 1U << 1,
+ KCTX_ACTIVE = 1U << 2,
+ KCTX_PULLED = 1U << 3,
+ KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
+ KCTX_INFINITE_CACHE = 1U << 5,
+ KCTX_SUBMIT_DISABLED = 1U << 6,
+ KCTX_PRIVILEGED = 1U << 7,
+ KCTX_SCHEDULED = 1U << 8,
+ KCTX_DYING = 1U << 9,
+};
+
struct kbase_context {
struct file *filp;
struct kbase_device *kbdev;
@@ -1207,7 +1314,7 @@ struct kbase_context {
atomic_t event_count;
int event_coalesce_count;
- bool is_compat;
+ atomic_t flags;
atomic_t setup_complete;
atomic_t setup_in_progress;
@@ -1251,12 +1358,11 @@ struct kbase_context {
/** This is effectively part of the Run Pool, because it only has a valid
* setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
*
- * The kbasep_js_device_data::runpool_irq::lock must be held whilst accessing
- * this.
+ * The hwaccess_lock must be held whilst accessing this.
*
* If the context relating to this as_nr is required, you must use
* kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
- * whilst you're using it. Alternatively, just hold the kbasep_js_device_data::runpool_irq::lock
+ * whilst you're using it. Alternatively, just hold the hwaccess_lock
* to ensure the context doesn't disappear (but this has restrictions on what other locks
* you can take whilst doing this) */
int as_nr;
@@ -1282,8 +1388,7 @@ struct kbase_context {
size_t mem_profile_size;
/* Mutex guarding memory profile state */
struct mutex mem_profile_lock;
- /* Memory profile file created */
- bool mem_profile_initialized;
+ /* Memory profile directory under debugfs */
struct dentry *kctx_dentry;
/* for job fault debug */
@@ -1303,15 +1408,6 @@ struct kbase_context {
atomic_t atoms_pulled;
/* Number of atoms currently pulled from this context, per slot */
atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
- /* true if last kick() caused atoms to be pulled from this context */
- bool pulled;
- /* true if infinite cache is to be enabled for new allocations. Existing
- * allocations will not change. bool stored as a u32 per Linux API */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
- bool infinite_cache_active;
-#else
- u32 infinite_cache_active;
-#endif
/* Bitmask of slots that can be pulled from */
u32 slots_pullable;
@@ -1325,17 +1421,11 @@ struct kbase_context {
struct kbase_vinstr_client *vinstr_cli;
struct mutex vinstr_cli_lock;
- /* Must hold queue_mutex when accessing */
- bool ctx_active;
-
/* List of completed jobs waiting for events to be posted */
struct list_head completed_jobs;
/* Number of work items currently pending on job_done_wq */
atomic_t work_count;
- /* true if context is counted in kbdev->js_data.nr_contexts_runnable */
- bool ctx_runnable_ref;
-
/* Waiting soft-jobs will fail when this timer expires */
struct timer_list soft_job_timeout;