summaryrefslogtreecommitdiff
path: root/gxp-internal.h
blob: 0034800a207c890f84e2dc008187962353c6524b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * GXP driver common internal definitions.
 *
 * Copyright (C) 2021 Google LLC
 */
#ifndef __GXP_INTERNAL_H__
#define __GXP_INTERNAL_H__

#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>

#include "gxp-config.h"
#include "gxp-tmp.h"

/* Holds Client's TPU mailboxes info used during mapping */
struct gxp_tpu_mbx_desc {
	uint phys_core_list;
	size_t cmdq_size, respq_size;
};

/* Holds state belonging to a client */
struct gxp_client {
	struct gxp_dev *gxp;
	void *app;
	bool vd_allocated;
	bool tpu_mbx_allocated;
	struct gxp_tpu_mbx_desc mbx_desc;
};

/* ioremapped resource */
struct gxp_mapped_resource {
	void __iomem *vaddr;		 /* starting virtual address */
	phys_addr_t paddr;		 /* starting physical address */
	dma_addr_t daddr;		 /* starting device address */
	resource_size_t size;		 /* size in bytes */
};

struct mailbox_resp_list {
	struct list_head list;
	struct gxp_response *resp;
};

/* Structure to hold TPU device info */
struct gxp_tpu_dev {
	struct device *dev;
	phys_addr_t mbx_paddr;
};

/* Forward declarations from submodules */
struct gxp_mailbox_manager;
struct gxp_debug_dump_manager;
struct gxp_mapping_root;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
struct gxp_power_manager;
struct gxp_telemetry_manager;

struct gxp_dev {
	struct device *dev;		 /* platform bus device */
	struct miscdevice misc_dev;	 /* misc device structure */
	struct dentry *d_entry;		 /* debugfs dir for this device */
	struct gxp_mapped_resource regs; /* ioremapped CSRs */
	struct gxp_mapped_resource mbx[GXP_NUM_CORES]; /* mailbox CSRs */
	struct gxp_mapped_resource fwbufs[GXP_NUM_CORES]; /* FW carveout */
	struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
	struct gxp_mapped_resource coredumpbuf; /* core dump carveout */
	struct gxp_mailbox_manager *mailbox_mgr;
	struct gxp_power_manager *power_mgr;
	/*
	 * TODO(b/182416287): This should be a rb_tree of lists keyed by
	 * virtual device. For now, keep an array of one list per physical core
	 */
	struct list_head mailbox_resp_queues[GXP_NUM_CORES];
	wait_queue_head_t mailbox_resp_waitqs[GXP_NUM_CORES];
	spinlock_t mailbox_resps_lock;
	struct gxp_debug_dump_manager *debug_dump_mgr;
	struct gxp_mapping_root *mappings;	/* tree of user mappings */
	u32 firmware_running;		 /* firmware status bitmap */
	struct mutex vd_lock;		 /* synchronizes vd operations */
	struct gxp_client *core_to_client[GXP_NUM_CORES];
	struct gxp_client *debugfs_client;
	struct gxp_dma_manager *dma_mgr;
	struct gxp_fw_data_manager *data_mgr;
	struct gxp_tpu_dev tpu_dev;
	struct gxp_telemetry_manager *telemetry_mgr;
};

/* GXP device IO functions */

static inline u32 gxp_read_32(struct gxp_dev *gxp, uint reg_offset)
{
	return readl(gxp->regs.vaddr + reg_offset);
}

static inline void gxp_write_32(struct gxp_dev *gxp, uint reg_offset, u32 value)
{
	writel(value, gxp->regs.vaddr + reg_offset);
}

static inline u32 gxp_read_32_core(struct gxp_dev *gxp, uint core,
				   uint reg_offset)
{
	uint offset = GXP_CORE_0_BASE + (GXP_CORE_SIZE * core) + reg_offset;

	return gxp_read_32(gxp, offset);
}

static inline void gxp_write_32_core(struct gxp_dev *gxp, uint core,
				     uint reg_offset, u32 value)
{
	uint offset = GXP_CORE_0_BASE + (GXP_CORE_SIZE * core) + reg_offset;

	gxp_write_32(gxp, offset, value);
}

static inline void gxp_acquire_sync_barrier(struct gxp_dev *gxp, uint index)
{
	uint barrier_reg_offset;

	if (index >= SYNC_BARRIER_COUNT) {
		dev_err(gxp->dev,
			"Attempt to acquire non-existent sync barrier: %d\n",
			index);
		return;
	}

	barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index);
	while (gxp_read_32(gxp, barrier_reg_offset) !=
	       SYNC_BARRIER_FREE_VALUE) {
		/*
		 * Sleep for the minimum amount.
		 * msleep(1~20) may not do what the caller intends, and will
		 * often sleep longer (~20 ms actual sleep for any value given
		 * in the 1~20ms range).
		 */
		msleep(20);
	}
}

static inline void gxp_release_sync_barrier(struct gxp_dev *gxp, uint index)
{
	uint barrier_reg_offset;

	if (index >= SYNC_BARRIER_COUNT) {
		dev_err(gxp->dev,
			"Attempt to acquire non-existent sync barrier: %d\n",
			index);
		return;
	}

	barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index);
	gxp_write_32(gxp, barrier_reg_offset, 1);
}
static inline u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
{
	uint barrier_reg_offset;

	if (index >= SYNC_BARRIER_COUNT) {
		dev_err(gxp->dev,
			"Attempt to read non-existent sync barrier: %0u\n",
			index);
		return 0;
	}

	barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index) +
			     SYNC_BARRIER_SHADOW_OFFSET;

	return gxp_read_32(gxp, barrier_reg_offset);
}

static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
					    struct resource *r, char *phandle)
{
	int ret;
	struct device_node *np;

	np = of_parse_phandle(gxp->dev->of_node, phandle, 0);
	if (IS_ERR_OR_NULL(np)) {
		dev_err(gxp->dev, "Failed to find \"%s\" reserved memory\n",
			phandle);
		return -ENODEV;
	}

	ret = of_address_to_resource(np, 0, r);
	of_node_put(np);

	return ret;
}

#endif /* __GXP_INTERNAL_H__ */