summaryrefslogtreecommitdiff
path: root/gxp.h
blob: ccfb614aceee276c2dc5d95a03857fd82f43a92b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * GXP kernel-userspace interface definitions.
 *
 * Copyright (C) 2020 Google LLC
 */
#ifndef __GXP_H__
#define __GXP_H__

#include <linux/ioctl.h>
#include <linux/types.h>

/*
 * mmap offsets for logging and tracing buffers
 * Requested size will be divided evenly among all cores. The whole buffer
 * must be page-aligned, and the size of each core's buffer must be a multiple
 * of PAGE_SIZE.
 */
#define GXP_MMAP_LOG_BUFFER_OFFSET	0x10000
#define GXP_MMAP_TRACE_BUFFER_OFFSET	0x20000

#define GXP_IOCTL_BASE 0xEE

/* GXP map flag macros */

/* The mask for specifying DMA direction in GXP map flag */
#define GXP_MAP_DIR_MASK		3
/* The targeted DMA direction for the buffer */
#define GXP_MAP_DMA_BIDIRECTIONAL	0
#define GXP_MAP_DMA_TO_DEVICE		1
#define GXP_MAP_DMA_FROM_DEVICE		2

/*
 * TODO(b/209083969) The following IOCTLs will no longer require the caller
 * to hold a virtual device wakelock to call them once virtual device
 * suspend/resume is implemented:
 * - GXP_MAP_BUFFER
 * - GXP_UNMAP_BUFFER
 * - GXP_SYNC_BUFFER
 * - GXP_MAP_DMABUF
 * - GXP_UNMAP_DMABUF
 */

struct gxp_map_ioctl {
	/*
	 * Bitfield indicating which virtual cores to map the buffer for.
	 * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
	 *
	 * This field is not used by the unmap IOCTL, which always unmaps a
	 * buffer for all cores it had been mapped for.
	 */
	__u16 virtual_core_list;
	__u64 host_address;	/* virtual address in the process space */
	__u32 size;		/* size of mapping in bytes */
	/*
	 * Flags indicating mapping attribute requests from the runtime.
	 * Set RESERVED bits to 0 to ensure backwards compatibility.
	 *
	 * Bitfields:
	 *   [1:0]   - DMA_DIRECTION:
	 *               00 = DMA_BIDIRECTIONAL (host/device can write buffer)
	 *               01 = DMA_TO_DEVICE     (host can write buffer)
	 *               10 = DMA_FROM_DEVICE   (device can write buffer)
	 *             Note: DMA_DIRECTION is the direction in which data moves
	 *             from the host's perspective.
	 *   [31:2]  - RESERVED
	 */
	__u32 flags;
	__u64 device_address;	/* returned device address */
};

/*
 * Map host buffer.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 */
#define GXP_MAP_BUFFER \
	_IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)

/*
 * Un-map host buffer previously mapped by GXP_MAP_BUFFER.
 *
 * Only the @device_address field will be used. Other fields will be fetched
 * from the kernel's internal records. It is recommended to use the argument
 * that was passed in GXP_MAP_BUFFER to un-map the buffer.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 */
#define GXP_UNMAP_BUFFER \
	_IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)

/* GXP sync flag macros */
#define GXP_SYNC_FOR_DEVICE		(0)
#define GXP_SYNC_FOR_CPU		(1)

struct gxp_sync_ioctl {
	/*
	 * The starting address of the buffer to be synchronized. Must be a
	 * device address returned by GXP_MAP_BUFFER.
	 */
	__u64 device_address;
	/* size in bytes to be sync'ed */
	__u32 size;
	/*
	 * offset in bytes at which the sync operation is to begin from the
	 * start of the buffer
	 */
	__u32 offset;
	/*
	 * Flags indicating sync operation requested from the runtime.
	 * Set RESERVED bits to 0 to ensure backwards compatibility.
	 *
	 * Bitfields:
	 *   [0:0]   - Sync direction. Sync for device or CPU.
	 *               0 = sync for device
	 *               1 = sync for CPU
	 *   [31:1]  - RESERVED
	 */
	__u32 flags;
};

/*
 * Sync buffer previously mapped by GXP_MAP_BUFFER.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 *
 * EINVAL: If a mapping for @device_address is not found.
 * EINVAL: If @size equals 0.
 * EINVAL: If @offset plus @size exceeds the mapping size.
 */
#define GXP_SYNC_BUFFER \
	_IOW(GXP_IOCTL_BASE, 2, struct gxp_sync_ioctl)

struct gxp_mailbox_command_ioctl {
	/*
	 * Input:
	 * The virtual core to dispatch the command to.
	 */
	__u16 virtual_core_id;
	/*
	 * Output:
	 * The sequence number assigned to this command. The caller can use
	 * this value to match responses fetched via `GXP_MAILBOX_RESPONSE`
	 * with this command.
	 */
	__u64 sequence_number;
	/*
	 * Input:
	 * Device address to the buffer containing a GXP command. The user
	 * should have obtained this address from the GXP_MAP_BUFFER ioctl.
	 */
	__u64 device_address;
	/*
	 * Input:
	 * Size of the buffer at `device_address` in bytes.
	 */
	__u32 size;
	/*
	 * Input:
	 * Flags describing the command, for use by the GXP device.
	 */
	__u32 flags;
};

/*
 * Push element to the mailbox commmand queue.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 */
#define GXP_MAILBOX_COMMAND \
	_IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_ioctl)

/* GXP mailbox response error code values */
#define GXP_RESPONSE_ERROR_NONE         (0)
#define GXP_RESPONSE_ERROR_INTERNAL     (1)
#define GXP_RESPONSE_ERROR_TIMEOUT      (2)

struct gxp_mailbox_response_ioctl {
	/*
	 * Input:
	 * The virtual core to fetch a response from.
	 */
	__u16 virtual_core_id;
	/*
	 * Output:
	 * Sequence number indicating which command this response is for.
	 */
	__u64 sequence_number;
	/*
	 * Output:
	 * Driver error code.
	 * Indicates if the response was obtained successfully,
	 * `GXP_RESPONSE_ERROR_NONE`, or what error prevented the command
	 * from completing successfully.
	 */
	__u16 error_code;
	/*
	 * Output:
	 * Value returned by firmware in response to a command.
	 * Only valid if `error_code` == GXP_RESPONSE_ERROR_NONE
	 */
	__u32 cmd_retval;
};

/*
 * Pop element from the mailbox response queue. Blocks until mailbox response
 * is available.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 */
#define GXP_MAILBOX_RESPONSE \
	_IOWR(GXP_IOCTL_BASE, 4, struct gxp_mailbox_response_ioctl)

struct gxp_specs_ioctl {
	__u8 core_count;
	__u16 version_major;
	__u16 version_minor;
	__u16 version_build;
	__u8 threads_per_core;
	__u32 memory_per_core;		/* measured in kB */
};

/* Query system specs. */
#define GXP_GET_SPECS \
	_IOR(GXP_IOCTL_BASE, 5, struct gxp_specs_ioctl)

struct gxp_virtual_device_ioctl {
	/*
	 * Input:
	 * The number of cores requested for the virtual device.
	 */
	__u8 core_count;
	/*
	 * Input:
	 * The number of threads requested per core.
	 */
	__u16 threads_per_core;
	/*
	 * Input:
	 * The amount of memory requested per core, in kB.
	 */
	__u32 memory_per_core;
	/*
	 * Output:
	 * The ID assigned to the virtual device and shared with its cores.
	 */
	__u32 vdid;
};

/* Allocate virtual device. */
#define GXP_ALLOCATE_VIRTUAL_DEVICE \
	_IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)

#define ETM_TRACE_LSB_MASK 0x1
#define ETM_TRACE_SYNC_MSG_PERIOD_MIN 8
#define ETM_TRACE_SYNC_MSG_PERIOD_MAX 256
#define ETM_TRACE_PC_MATCH_MASK_LEN_MAX 31

/*
 * For all *_enable and pc_match_sense fields, only the least significant bit is
 * considered. All other bits are ignored.
 */
struct gxp_etm_trace_start_ioctl {
	__u16 virtual_core_id;
	__u8 trace_ram_enable; /* Enables local trace memory. */
	/* When set, trace output is sent out on the ATB interface. */
	__u8 atb_enable;
	/* Enables embedding timestamp information in trace messages. */
	__u8 timestamp_enable;
	/*
	 * Determines the rate at which synchronization messages are
	 * automatically emitted in the output trace.
	 * Valid values: 0, 8, 16, 32, 64, 128, 256
	 * Eg. A value of 16 means 1 synchronization message will be emitted
	 * every 16 messages.
	 * A value of 0 means no synchronization messages will be emitted.
	 */
	__u16 sync_msg_period;
	__u8 pc_match_enable; /* PC match causes Stop trigger. */
	/*
	 * 32-bit address to compare to processor PC when pc_match_enable = 1.
	 * A match for a given executed instruction triggers trace stop.
	 * Note: trigger_pc is ignored when pc_match_enable = 0.
	 */
	__u32 trigger_pc;
	/*
	 * Indicates how many of the lower bits of trigger_pc to ignore.
	 * Valid values: 0 to 31
	 * Note: pc_match_mask_length is ignored when pc_match_enable = 0.
	 */
	__u8 pc_match_mask_length;
	/* When 0, match when the processor's PC is in-range of trigger_pc and
	 * mask. When 1, match when the processor's PC is out-of-range of
	 * trigger_pc and mask.
	 * Note: pc_match_sense is ignored when pc_match_enable = 0.
	 */
	__u8 pc_match_sense;
};

/* Configure ETM trace registers and start ETM tracing. */
#define GXP_ETM_TRACE_START_COMMAND \
	_IOW(GXP_IOCTL_BASE, 7, struct gxp_etm_trace_start_ioctl)

/*
 * Halts trace generation via a software trigger. The virtual core id is passed
 * in as an input.
 */
#define GXP_ETM_TRACE_SW_STOP_COMMAND \
	_IOW(GXP_IOCTL_BASE, 8, __u16)

/*
 * Users should call this IOCTL after tracing has been stopped for the last
 * trace session of the core. Otherwise, there is a risk of having up to 3 bytes
 * of trace data missing towards the end of the trace session.
 * This is a workaround for b/180728272 and b/181623511.
 * The virtual core id is passed in as an input.
 */
#define GXP_ETM_TRACE_CLEANUP_COMMAND \
	_IOW(GXP_IOCTL_BASE, 9, __u16)

#define GXP_TRACE_HEADER_SIZE 256
#define GXP_TRACE_RAM_SIZE 4096
struct gxp_etm_get_trace_info_ioctl {
	/*
	 * Input:
	 * The virtual core to fetch a response from.
	 */
	__u16 virtual_core_id;
	/*
	 * Input:
	 * The type of data to retrieve.
	 * 0: Trace Header only
	 * 1: Trace Header + Trace Data in Trace RAM
	 */
	__u8 type;
	/*
	 * Input:
	 * Trace header user space address to contain trace header information
	 * that is used for decoding the trace.
	 */
	__u64 trace_header_addr;
	/*
	 * Input:
	 * Trace data user space address to contain Trace RAM data.
	 * Note: trace_data field will be empty if type == 0
	 */
	__u64 trace_data_addr;
};

/* Retrieves trace header and/or trace data for decoding purposes. */
#define GXP_ETM_GET_TRACE_INFO_COMMAND \
	_IOWR(GXP_IOCTL_BASE, 10, struct gxp_etm_get_trace_info_ioctl)

#define GXP_TELEMETRY_TYPE_LOGGING	(0)
#define GXP_TELEMETRY_TYPE_TRACING	(1)

/*
 * Enable either logging or software tracing for all cores.
 * Accepts either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
 * to specify whether logging or software tracing is to be enabled.
 *
 * Buffers for logging or tracing must have already been mapped via an `mmap()`
 * call with the respective offset and initialized by the client, prior to
 * calling this ioctl.
 *
 * If firmware is already running on any cores, they will be signaled to begin
 * logging/tracing to their buffers. Any cores booting after this call will
 * begin logging/tracing as soon as their firmware is able to.
 */
#define GXP_ENABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)

/*
 * Disable either logging or software tracing for all cores.
 * Accepts either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
 * to specify whether logging or software tracing is to be disabled.
 *
 * This call will block until any running cores have been notified and ACKed
 * that they have disabled the specified telemetry type.
 */
#define GXP_DISABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)

struct gxp_tpu_mbx_queue_ioctl {
	__u32 tpu_fd; /* TPU virtual device group fd */
	/*
	 * Bitfield indicating which virtual cores to allocate and map the
	 * buffers for.
	 * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
	 *
	 * This field is not used by the unmap IOCTL, which always unmaps the
	 * buffers for all cores it had been mapped for.
	 */
	__u32 virtual_core_list;
	/*
	 * The user address of an edgetpu_mailbox_attr struct, containing
	 * cmd/rsp queue size, mailbox priority and other relevant info.
	 * This structure is defined in edgetpu.h in the TPU driver.
	 */
	__u64 attr_ptr;
};

/*
 * Map TPU-DSP mailbox cmd/rsp queue buffers.
 */
#define GXP_MAP_TPU_MBX_QUEUE \
	_IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)

/*
 * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
 * GXP_MAP_TPU_MBX_QUEUE.
 *
 * Only the @tpu_fd field will be used. Other fields will be fetched
 * from the kernel's internal records. It is recommended to use the argument
 * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
 */
#define GXP_UNMAP_TPU_MBX_QUEUE \
	_IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)

struct gxp_register_telemetry_eventfd_ioctl {
	/*
	 * File-descriptor obtained via eventfd().
	 *
	 * Not used during the unregister step; the driver will unregister
	 * whichever eventfd it has currently registered for @type, if any.
	 */
	__u32 eventfd;
	/*
	 * Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
	 * The driver will signal @eventfd whenever any core signals a
	 * telemetry state change while this type of telemetry is active.
	 */
	__u8 type;
};

#define GXP_REGISTER_TELEMETRY_EVENTFD                                         \
	_IOW(GXP_IOCTL_BASE, 15, struct gxp_register_telemetry_eventfd_ioctl)

#define GXP_UNREGISTER_TELEMETRY_EVENTFD                                       \
	_IOW(GXP_IOCTL_BASE, 16, struct gxp_register_telemetry_eventfd_ioctl)

/*
 * Reads the 2 global counter registers in AURORA_TOP and combines them to
 * return the full 64-bit value of the counter.
 */
#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)

/*
 * Components for which a client may hold a wakelock.
 * Acquired by passing these values as `components_to_wake` in
 * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
 * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
 *
 * Multiple wakelocks can be acquired or released at once by passing multiple
 * components, ORed together.
 */
#define WAKELOCK_BLOCK		(1 << 0)
#define WAKELOCK_VIRTUAL_DEVICE	(1 << 1)

/*
 * DSP subsystem Power state values for use as `gxp_power_state` in
 * `struct gxp_acquire_wakelock_ioctl`
 */
#define GXP_POWER_STATE_OFF	0
#define GXP_POWER_STATE_UUD	1
#define GXP_POWER_STATE_SUD	2
#define GXP_POWER_STATE_UD	3
#define GXP_POWER_STATE_NOM	4

/*
 * Memory interface power state values for use as `memory_power_state` in
 * `struct gxp_acquire_wakelock_ioctl`.
 */
#define MEMORY_POWER_STATE_UNDEFINED	0
#define MEMORY_POWER_STATE_MIN		1
#define MEMORY_POWER_STATE_VERY_LOW	2
#define MEMORY_POWER_STATE_LOW		3
#define MEMORY_POWER_STATE_HIGH		4
#define MEMORY_POWER_STATE_VERY_HIGH	5
#define MEMORY_POWER_STATE_MAX		6

struct gxp_acquire_wakelock_ioctl {
	/*
	 * The components for which a wakelock will be acquired.
	 * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
	 * bitwise OR of both.
	 *
	 * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
	 * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
	 * client must already have acquired a BLOCK wakelock or acquire both
	 * in the same call.
	 */
	__u32 components_to_wake;
	/*
	 * Minimum power state to operate the entire DSP subsystem at until
	 * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
	 * from above. Note that the requested power state will not be cleared
	 * if only the VIRTUAL_DEVICE wakelock is released.
	 *
	 * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
	 * wakelock.
	 */
	__u32 gxp_power_state;
	/*
	 * Memory interface power state to request from the system so long as
	 * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
	 * from above. The requested memory power state will not be cleared if
	 * only the VIRTUAL_DEVICE wakelock is released.
	 *
	 * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
	 * the memory interface power state will be made.
	 */
	__u32 memory_power_state;
	/*
	 * How long to wait, in microseconds, before returning if insufficient
	 * physical cores are available when attempting to acquire a
	 * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
	 * should not wait at all if cores are not available.
	 */
	__u32 vd_timeout_us;
};

/*
 * Acquire a wakelock and request minimum power states for the DSP subsystem
 * and the memory interface.
 *
 * Upon a successful return, the specified components will be powered on and if
 * they were not already running at the specified or higher power states,
 * requests will have been sent to transition both the DSP subsystem and
 * memory interface to the specified states.
 *
 * If the same client invokes this IOCTL for the same component more than once
 * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
 * second call will update requested power states, but have no other effects.
 * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
 *
 * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
 * insufficient physical cores available, the driver will wait up to
 * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
 * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
 * wakelocks were being requested, neither will have been acquired.
 */
#define GXP_ACQUIRE_WAKE_LOCK                                                  \
	_IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_ioctl)

/*
 * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
 *
 * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
 * bitwise OR of both.
 *
 * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
 * removed from physical cores. At that point the cores may be reallocated to
 * another client or powered down.
 *
 * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
 * down. If a client attempts to release a BLOCK wakelock while still holding
 * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
 *
 * If a client attempts to release a wakelock it does not hold, this IOCTL will
 * return -ENODEV.
 */
#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)

struct gxp_map_dmabuf_ioctl {
	/*
	 * Bitfield indicating which virtual cores to map the dma-buf for.
	 * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
	 *
	 * This field is not used by the unmap dma-buf IOCTL, which always
	 * unmaps a dma-buf for all cores it had been mapped for.
	 */
	__u16 virtual_core_list;
	__s32 dmabuf_fd;	/* File descriptor of the dma-buf to map. */
	/*
	 * Flags indicating mapping attribute requests from the runtime.
	 * Set RESERVED bits to 0 to ensure backwards compatibility.
	 *
	 * Bitfields:
	 *   [1:0]   - DMA_DIRECTION:
	 *               00 = DMA_BIDIRECTIONAL (host/device can write buffer)
	 *               01 = DMA_TO_DEVICE     (host can write buffer)
	 *               10 = DMA_FROM_DEVICE   (device can write buffer)
	 *             Note: DMA_DIRECTION is the direction in which data moves
	 *             from the host's perspective.
	 *   [31:2]  - RESERVED
	 */
	__u32 flags;
	/*
	 * Device address the dmabuf is mapped to.
	 * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
	 *   can be accessed from by the device.
	 * - GXP_UNMAP_DMABUF expects this field to contain the value from the
	 *   mapping call, and uses it to determine which dma-buf to unmap.
	 */
	__u64 device_address;
};

/*
 * Map host buffer via its dma-buf FD.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 */
#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)

/*
 * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
 *
 * Only the @device_address field is used. Other fields are fetched from the
 * kernel's internal records. It is recommended to use the argument that was
 * passed in GXP_MAP_DMABUF to un-map the dma-buf.
 *
 * The client must hold a VIRTUAL_DEVICE wakelock.
 */
#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)

#endif /* __GXP_H__ */