summaryrefslogtreecommitdiff
path: root/gxp-internal.h
blob: 5671c0de6f789bd1203bee89ad44d7db2c615ad8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * GXP driver common internal definitions.
 *
 * Copyright (C) 2021 Google LLC
 */
#ifndef __GXP_INTERNAL_H__
#define __GXP_INTERNAL_H__

#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>

#include "gxp-config.h"

/* Holds Client's TPU mailboxes info used during mapping */
struct gxp_tpu_mbx_desc {
	uint phys_core_list;
	uint virt_core_list;
	size_t cmdq_size, respq_size;
};

/* ioremapped resource */
struct gxp_mapped_resource {
	void __iomem *vaddr;		 /* starting virtual address */
	phys_addr_t paddr;		 /* starting physical address */
	dma_addr_t daddr;		 /* starting device address */
	resource_size_t size;		 /* size in bytes */
};

/* Structure to hold TPU device info */
struct gxp_tpu_dev {
	struct device *dev;
	phys_addr_t mbx_paddr;
};

/* Forward declarations from submodules */
struct gxp_client;
struct gxp_mailbox_manager;
struct gxp_debug_dump_manager;
struct gxp_domain_pool;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
struct gxp_telemetry_manager;
struct gxp_thermal_manager;
struct gxp_wakelock_manager;
struct gxp_usage_stats;

struct gxp_dev {
	struct device *dev;		 /* platform bus device */
	struct miscdevice misc_dev;	 /* misc device structure */
	struct dentry *d_entry;		 /* debugfs dir for this device */
	struct gxp_mapped_resource regs; /* ioremapped CSRs */
	struct gxp_mapped_resource mbx[GXP_NUM_MAILBOXES]; /* mailbox CSRs */
	struct gxp_mapped_resource fwbufs[GXP_NUM_CORES]; /* FW carveout */
	struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
	struct gxp_mapped_resource cmu; /* CMU CSRs */
	struct gxp_mailbox_manager *mailbox_mgr;
	struct gxp_power_manager *power_mgr;
	struct gxp_debug_dump_manager *debug_dump_mgr;
	struct gxp_firmware_manager *firmware_mgr;
	/*
	 * Lock to ensure only one thread at a time is ever calling
	 * `pin_user_pages_fast()` during mapping, otherwise it will fail.
	 */
	struct mutex pin_user_pages_lock;
	/*
	 * Reader/writer lock protecting usage of virtual cores assigned to
	 * physical cores.
	 * A writer is any function creating or destroying a virtual core, or
	 * running or stopping one on a physical core.
	 * A reader is any function making use of or interacting with a virtual
	 * core without starting or stopping it on a physical core.
	 * The fields `core_to_vd[]` and `firmware_running` are also protected
	 * by this lock.
	 */
	struct rw_semaphore vd_semaphore;
	struct gxp_virtual_device *core_to_vd[GXP_NUM_CORES];
	struct gxp_client *debugfs_client;
	struct mutex debugfs_client_lock;
	bool debugfs_wakelock_held;
	struct gxp_thermal_manager *thermal_mgr;
	struct gxp_dma_manager *dma_mgr;
	struct gxp_fw_data_manager *data_mgr;
	struct gxp_tpu_dev tpu_dev;
	struct gxp_telemetry_manager *telemetry_mgr;
	struct gxp_wakelock_manager *wakelock_mgr;
	/*
	 * Pointer to GSA device for firmware authentication.
	 * May be NULL if the chip does not support firmware authentication
	 */
	struct device *gsa_dev;
	u32 memory_per_core;
	struct gxp_domain_pool *domain_pool;
	struct list_head client_list;
	struct mutex client_list_lock;
	/*
	 * Buffer shared across firmware.
	 * Its paddr is 0 if the shared buffer is not available.
	 * Its vaddr is always 0 as this region is not expected to be accessible
	 * to us.
	 */
	struct gxp_mapped_resource shared_buf;
	struct gxp_usage_stats *usage_stats; /* Stores the usage stats */

	/* callbacks for chip-dependent implementations */

	/*
	 * For parsing chip-dependent device tree attributes.
	 *
	 * Called as the first step in the common device probing procedure.
	 *
	 * Do NOT use non-device managed allocations in this function, to
	 * prevent memory leak when the probe procedure fails.
	 *
	 * Return a non-zero value can fail the probe procedure.
	 *
	 * This callback is optional.
	 */
	int (*parse_dt)(struct platform_device *pdev, struct gxp_dev *gxp);
	/*
	 * Called when common device probing procedure is done.
	 *
	 * Return a non-zero value can fail the probe procedure.
	 *
	 * This callback is optional.
	 */
	int (*after_probe)(struct gxp_dev *gxp);
	/*
	 * Called before common device removal procedure.
	 *
	 * This callback is optional.
	 */
	void (*before_remove)(struct gxp_dev *gxp);
	/*
	 * Device ioctl handler for chip-dependent ioctl calls.
	 * Should return -ENOTTY when the ioctl should be handled by common
	 * device ioctl handler.
	 *
	 * This callback is optional.
	 */
	long (*handle_ioctl)(struct file *file, uint cmd, ulong arg);
	/*
	 * Called when allocating a virtual device is done.
	 *
	 * Return a non-zero value can fail the vd allocation.
	 *
	 * This callback is optional.
	 */
	int (*after_allocate_vd)(struct gxp_dev *gxp,
				 struct gxp_virtual_device *vd);
	/*
	 * Called before releasing the virtual device.
	 *
	 * This callback is optional.
	 */
	void (*before_release_vd)(struct gxp_dev *gxp,
				  struct gxp_virtual_device *vd);
};

/* GXP device IO functions */

static inline u32 gxp_read_32(struct gxp_dev *gxp, uint reg_offset)
{
	return readl(gxp->regs.vaddr + reg_offset);
}

static inline void gxp_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
{
	writel(value, gxp->regs.vaddr + reg_offset);
}

static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
					    struct resource *r, char *phandle)
{
	int ret;
	struct device_node *np;

	np = of_parse_phandle(gxp->dev->of_node, phandle, 0);
	if (IS_ERR_OR_NULL(np)) {
		dev_err(gxp->dev, "Failed to find \"%s\" reserved memory\n",
			phandle);
		return -ENODEV;
	}

	ret = of_address_to_resource(np, 0, r);
	of_node_put(np);

	return ret;
}

/*
 * To specify whether AP and DSP cores directly communicate by the core mailboxes.
 * All platform drivers of each chip should implement this.
 */
bool gxp_is_direct_mode(struct gxp_dev *gxp);

#endif /* __GXP_INTERNAL_H__ */