summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoman Kiryanov <rkir@google.com>2020-02-05 09:55:57 -0800
committerRoman Kiryanov <rkir@google.com>2020-02-05 16:35:36 -0800
commitfc395e29714433774fcb63b611d23201cf650034 (patch)
tree2517adc60fb76f7d69da7b9061bd16c389ab73a9
parent5ac83f3bfbbc80ef7bc4dbaa4535c64bbbbf31cf (diff)
downloadgoldfish-modules-fc395e29714433774fcb63b611d23201cf650034.tar.gz
Add goldfish modules for r-goldfish-android-5.4
Moved from android/kernel/goldfish/android-goldfish-4.14-dev How to build: repo init -u https://android.googlesource.com/kernel/manifest \ -b r-goldfish-android-5.4 repo sync BUILD_CONFIG=goldfish-modules/build.config.goldfish.x86_64 \ build/build.sh ls -1 ./out/android-5.4/dist/ bzImage goldfish_address_space.ko goldfish_battery.ko goldfish_cpufreq_stats.ko goldfish_pipe.ko goldfish_rotary.ko goldfish_sync.ko System.map vmlinux Bug: 147493341 Test: build Signed-off-by: Roman Kiryanov <rkir@google.com> Change-Id: I93cc8ada709275140290d9441614ba58b5a3c555
-rw-r--r--Kbuild8
-rw-r--r--Makefile8
-rw-r--r--build.config.goldfish2
-rw-r--r--build.config.goldfish.aarch642
-rw-r--r--build.config.goldfish.x86_642
-rw-r--r--goldfish_address_space.c930
-rw-r--r--goldfish_battery.c300
-rw-r--r--goldfish_cpufreq_stats.c83
-rw-r--r--goldfish_pipe.h36
-rw-r--r--goldfish_pipe_base.c144
-rw-r--r--goldfish_pipe_qemu.h141
-rw-r--r--goldfish_pipe_v1.c632
-rw-r--r--goldfish_pipe_v2.c1213
-rw-r--r--goldfish_rotary.c205
-rw-r--r--goldfish_sync.c833
-rw-r--r--uapi/goldfish_address_space.h53
-rw-r--r--uapi/goldfish_dma.h84
-rw-r--r--uapi/goldfish_sync.h28
18 files changed, 4704 insertions, 0 deletions
diff --git a/Kbuild b/Kbuild
new file mode 100644
index 0000000..3809c95
--- /dev/null
+++ b/Kbuild
@@ -0,0 +1,8 @@
+goldfish_pipe-objs = goldfish_pipe_base.o goldfish_pipe_v1.o goldfish_pipe_v2.o
+
+obj-m += goldfish_pipe.o
+obj-m += goldfish_sync.o
+obj-m += goldfish_address_space.o
+obj-m += goldfish_battery.o
+obj-m += goldfish_cpufreq_stats.o
+obj-m += goldfish_rotary.o
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..231822e
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,8 @@
+all:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+ $(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
+
+clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) clean
diff --git a/build.config.goldfish b/build.config.goldfish
new file mode 100644
index 0000000..77aff12
--- /dev/null
+++ b/build.config.goldfish
@@ -0,0 +1,2 @@
+BUILD_INITRAMFS=1
+EXT_MODULES="goldfish-modules"
diff --git a/build.config.goldfish.aarch64 b/build.config.goldfish.aarch64
new file mode 100644
index 0000000..534643b
--- /dev/null
+++ b/build.config.goldfish.aarch64
@@ -0,0 +1,2 @@
+. ${ROOT_DIR}/common/build.config.gki.aarch64
+. ${ROOT_DIR}/goldfish-modules/build.config.goldfish
diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64
new file mode 100644
index 0000000..91792de
--- /dev/null
+++ b/build.config.goldfish.x86_64
@@ -0,0 +1,2 @@
+. ${ROOT_DIR}/common/build.config.gki.x86_64
+. ${ROOT_DIR}/goldfish-modules/build.config.goldfish
diff --git a/goldfish_address_space.c b/goldfish_address_space.c
new file mode 100644
index 0000000..982960e
--- /dev/null
+++ b/goldfish_address_space.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include <linux/device.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+
+#include "uapi/goldfish_address_space.h"
+
+MODULE_DESCRIPTION("A Goldfish driver that allocates address space ranges in "
+ "the guest to populate them later in the host. This allows "
+ "sharing host's memory with the guest.");
+MODULE_AUTHOR("Roman Kiryanov <rkir@google.com>");
+MODULE_LICENSE("GPL v2");
+
+#define AS_DEBUG 0
+
+#if AS_DEBUG
+ #define AS_DPRINT(fmt, ...) \
+ printk(KERN_ERR "%s:%d " fmt "\n", \
+ __func__, __LINE__, ##__VA_ARGS__);
+#else
+ #define AS_DPRINT(fmt, ...)
+#endif
+
+enum as_register_id {
+ AS_REGISTER_COMMAND = 0,
+ AS_REGISTER_STATUS = 4,
+ AS_REGISTER_GUEST_PAGE_SIZE = 8,
+ AS_REGISTER_BLOCK_SIZE_LOW = 12,
+ AS_REGISTER_BLOCK_SIZE_HIGH = 16,
+ AS_REGISTER_BLOCK_OFFSET_LOW = 20,
+ AS_REGISTER_BLOCK_OFFSET_HIGH = 24,
+ AS_REGISTER_PING = 28,
+ AS_REGISTER_PING_INFO_ADDR_LOW = 32,
+ AS_REGISTER_PING_INFO_ADDR_HIGH = 36,
+ AS_REGISTER_HANDLE = 40,
+ AS_REGISTER_PHYS_START_LOW = 44,
+ AS_REGISTER_PHYS_START_HIGH = 48,
+};
+
+enum as_command_id {
+ AS_COMMAND_ALLOCATE_BLOCK = 1,
+ AS_COMMAND_DEALLOCATE_BLOCK = 2,
+ AS_COMMAND_GEN_HANDLE = 3,
+ AS_COMMAND_DESTROY_HANDLE = 4,
+ AS_COMMAND_TELL_PING_INFO_ADDR = 5,
+};
+
+#define AS_PCI_VENDOR_ID 0x607D
+#define AS_PCI_DEVICE_ID 0xF153
+#define AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY 32
+#define AS_INVALID_HANDLE (~(0))
+
+enum as_pci_bar_id {
+ AS_PCI_CONTROL_BAR_ID = 0,
+ AS_PCI_AREA_BAR_ID = 1,
+};
+
+struct as_device_state {
+ struct miscdevice miscdevice;
+ struct pci_dev *dev;
+ struct as_driver_state *driver_state;
+
+ void __iomem *io_registers;
+
+ void *address_area; /* to claim the address space */
+
+ /* physical address to allocate from */
+ unsigned long address_area_phys_address;
+
+ struct mutex registers_lock; /* protects registers */
+};
+
+struct as_block {
+ u64 offset;
+ u64 size;
+};
+
+struct as_allocated_blocks {
+ struct as_block *blocks; /* a dynamic array of allocated blocks */
+ int blocks_size;
+ int blocks_capacity;
+ struct mutex blocks_lock; /* protects operations with blocks */
+};
+
+struct as_file_state {
+ struct as_device_state *device_state;
+ struct as_allocated_blocks allocated_blocks;
+ struct as_allocated_blocks shared_allocated_blocks;
+ struct goldfish_address_space_ping *ping_info;
+ struct mutex ping_info_lock; /* protects ping_info */
+ u32 handle; /* handle generated by the host */
+};
+
+static void __iomem *as_register_address(void __iomem *base,
+ int offset)
+{
+ WARN_ON(!base);
+
+ return ((char __iomem *)base) + offset;
+}
+
+static void as_write_register(void __iomem *registers,
+ int offset,
+ u32 value)
+{
+ writel(value, as_register_address(registers, offset));
+}
+
+static u32 as_read_register(void __iomem *registers, int offset)
+{
+ return readl(as_register_address(registers, offset));
+}
+
+static int as_run_command(struct as_device_state *state, enum as_command_id cmd)
+{
+ WARN_ON(!state);
+
+ as_write_register(state->io_registers, AS_REGISTER_COMMAND, cmd);
+ return -as_read_register(state->io_registers, AS_REGISTER_STATUS);
+}
+
+static void as_ping_impl(struct as_device_state *state, u32 handle)
+{
+ as_write_register(state->io_registers, AS_REGISTER_PING, handle);
+}
+
+static long
+as_ioctl_allocate_block_locked_impl(struct as_device_state *state,
+ u64 *size, u64 *offset)
+{
+ long res;
+
+ as_write_register(state->io_registers,
+ AS_REGISTER_BLOCK_SIZE_LOW,
+ lower_32_bits(*size));
+ as_write_register(state->io_registers,
+ AS_REGISTER_BLOCK_SIZE_HIGH,
+ upper_32_bits(*size));
+
+ res = as_run_command(state, AS_COMMAND_ALLOCATE_BLOCK);
+ if (!res) {
+ u64 low = as_read_register(state->io_registers,
+ AS_REGISTER_BLOCK_OFFSET_LOW);
+ u64 high = as_read_register(state->io_registers,
+ AS_REGISTER_BLOCK_OFFSET_HIGH);
+ *offset = low | (high << 32);
+
+ low = as_read_register(state->io_registers,
+ AS_REGISTER_BLOCK_SIZE_LOW);
+ high = as_read_register(state->io_registers,
+ AS_REGISTER_BLOCK_SIZE_HIGH);
+ *size = low | (high << 32);
+ }
+
+ return res;
+}
+
+static long
+as_ioctl_unallocate_block_locked_impl(struct as_device_state *state, u64 offset)
+{
+ as_write_register(state->io_registers,
+ AS_REGISTER_BLOCK_OFFSET_LOW,
+ lower_32_bits(offset));
+ as_write_register(state->io_registers,
+ AS_REGISTER_BLOCK_OFFSET_HIGH,
+ upper_32_bits(offset));
+
+ return as_run_command(state, AS_COMMAND_DEALLOCATE_BLOCK);
+}
+
+static int as_blocks_grow_capacity(int old_capacity)
+{
+ WARN_ON(old_capacity < 0);
+
+ return old_capacity + old_capacity;
+}
+
+static int
+as_blocks_insert(struct as_allocated_blocks *allocated_blocks,
+ u64 offset,
+ u64 size)
+{
+ int blocks_size;
+
+ if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
+ return -ERESTARTSYS;
+
+ blocks_size = allocated_blocks->blocks_size;
+
+ WARN_ON(allocated_blocks->blocks_capacity < 1);
+ WARN_ON(allocated_blocks->blocks_capacity <
+ allocated_blocks->blocks_size);
+ WARN_ON(!allocated_blocks->blocks);
+
+ if (allocated_blocks->blocks_capacity == blocks_size) {
+ int new_capacity =
+ as_blocks_grow_capacity(
+ allocated_blocks->blocks_capacity);
+ struct as_block *new_blocks =
+ kcalloc(new_capacity,
+ sizeof(allocated_blocks->blocks[0]),
+ GFP_KERNEL);
+
+ if (!new_blocks) {
+ mutex_unlock(&allocated_blocks->blocks_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(new_blocks, allocated_blocks->blocks,
+ blocks_size * sizeof(allocated_blocks->blocks[0]));
+
+ kfree(allocated_blocks->blocks);
+ allocated_blocks->blocks = new_blocks;
+ allocated_blocks->blocks_capacity = new_capacity;
+ }
+
+ WARN_ON(blocks_size >= allocated_blocks->blocks_capacity);
+
+ allocated_blocks->blocks[blocks_size] =
+ (struct as_block){ .offset = offset, .size = size };
+ allocated_blocks->blocks_size = blocks_size + 1;
+
+ mutex_unlock(&allocated_blocks->blocks_lock);
+ return 0;
+}
+
+static int
+as_blocks_remove(struct as_allocated_blocks *allocated_blocks, u64 offset)
+{
+ long res = -ENXIO;
+ struct as_block *blocks;
+ int blocks_size;
+ int i;
+
+ if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
+ return -ERESTARTSYS;
+
+ blocks = allocated_blocks->blocks;
+ WARN_ON(!blocks);
+
+ blocks_size = allocated_blocks->blocks_size;
+ WARN_ON(blocks_size < 0);
+
+ for (i = 0; i < blocks_size; ++i) {
+ if (offset == blocks[i].offset) {
+ int last = blocks_size - 1;
+
+ if (last > i)
+ blocks[i] = blocks[last];
+
+ --allocated_blocks->blocks_size;
+ res = 0;
+ break;
+ }
+ }
+
+ if (res)
+ pr_err("%s: Block not found atoffset: 0x%llx\n",
+ __func__, offset);
+
+ mutex_unlock(&allocated_blocks->blocks_lock);
+ return res;
+}
+
+static int
+as_blocks_check_if_mine(struct as_allocated_blocks *allocated_blocks,
+ u64 offset,
+ u64 size)
+{
+ const u64 end = offset + size;
+ int res = -EPERM;
+ struct as_block *block;
+ int blocks_size;
+
+ if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
+ return -ERESTARTSYS;
+
+ block = allocated_blocks->blocks;
+ WARN_ON(!block);
+
+ blocks_size = allocated_blocks->blocks_size;
+ WARN_ON(blocks_size < 0);
+
+ for (; blocks_size > 0; --blocks_size, ++block) {
+ u64 block_offset = block->offset;
+ u64 block_end = block_offset + block->size;
+
+ if (offset >= block_offset && end <= block_end) {
+ res = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&allocated_blocks->blocks_lock);
+ return res;
+}
+
+static int as_open(struct inode *inode, struct file *filp)
+{
+ struct as_file_state *file_state;
+ struct as_device_state *device_state;
+ struct goldfish_address_space_ping *ping_info;
+ u64 ping_info_phys;
+ u64 ping_info_phys_returned;
+ int err;
+
+ AS_DPRINT("Get free page");
+ ping_info =
+ (struct goldfish_address_space_ping *)
+ __get_free_page(GFP_KERNEL);
+ ping_info_phys = virt_to_phys(ping_info);
+ AS_DPRINT("Got free page: %p 0x%llx", ping_info,
+ (unsigned long long)ping_info_phys);
+
+ if (!ping_info) {
+ printk(KERN_ERR "Could not alloc goldfish_address_space command buffer!\n");
+ err = -ENOMEM;
+ goto err_ping_info_alloc_failed;
+ }
+
+ file_state = kzalloc(sizeof(*file_state), GFP_KERNEL);
+ if (!file_state) {
+ err = -ENOMEM;
+ goto err_file_state_alloc_failed;
+ }
+
+ file_state->device_state =
+ container_of(filp->private_data,
+ struct as_device_state,
+ miscdevice);
+ device_state = file_state->device_state;
+
+ file_state->allocated_blocks.blocks =
+ kcalloc(AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY,
+ sizeof(file_state->allocated_blocks.blocks[0]),
+ GFP_KERNEL);
+
+ if (!file_state->allocated_blocks.blocks) {
+ err = -ENOMEM;
+ goto err_file_state_blocks_alloc_failed;
+ }
+
+ file_state->shared_allocated_blocks.blocks =
+ kcalloc(
+ AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY,
+ sizeof(file_state->shared_allocated_blocks.blocks[0]),
+ GFP_KERNEL);
+
+ if (!file_state->shared_allocated_blocks.blocks) {
+ err = -ENOMEM;
+ goto err_file_state_blocks_alloc_failed;
+ }
+
+ file_state->allocated_blocks.blocks_size = 0;
+ file_state->allocated_blocks.blocks_capacity =
+ AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY;
+ mutex_init(&file_state->allocated_blocks.blocks_lock);
+
+ file_state->shared_allocated_blocks.blocks_size = 0;
+ file_state->shared_allocated_blocks.blocks_capacity =
+ AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY;
+ mutex_init(&file_state->shared_allocated_blocks.blocks_lock);
+
+ mutex_init(&file_state->ping_info_lock);
+ file_state->ping_info = ping_info;
+
+ AS_DPRINT("Acq regs lock");
+ mutex_lock(&device_state->registers_lock);
+ AS_DPRINT("Got regs lock, gen handle");
+ as_run_command(device_state, AS_COMMAND_GEN_HANDLE);
+ file_state->handle = as_read_register(
+ device_state->io_registers,
+ AS_REGISTER_HANDLE);
+ AS_DPRINT("Got regs lock, read handle: %u", file_state->handle);
+ mutex_unlock(&device_state->registers_lock);
+
+ if (file_state->handle == AS_INVALID_HANDLE) {
+ err = -EINVAL;
+ goto err_gen_handle_failed;
+ }
+
+ AS_DPRINT("Acq regs lock 2");
+ mutex_lock(&device_state->registers_lock);
+ AS_DPRINT("Acqd regs lock 2, write handle and ping info addr");
+ as_write_register(
+ device_state->io_registers,
+ AS_REGISTER_HANDLE,
+ file_state->handle);
+ as_write_register(
+ device_state->io_registers,
+ AS_REGISTER_PING_INFO_ADDR_LOW,
+ lower_32_bits(ping_info_phys));
+ as_write_register(
+ device_state->io_registers,
+ AS_REGISTER_PING_INFO_ADDR_HIGH,
+ upper_32_bits(ping_info_phys));
+ AS_DPRINT("Do tell ping info addr");
+ as_run_command(device_state, AS_COMMAND_TELL_PING_INFO_ADDR);
+ ping_info_phys_returned =
+ ((u64)as_read_register(device_state->io_registers,
+ AS_REGISTER_PING_INFO_ADDR_LOW)) |
+ ((u64)as_read_register(device_state->io_registers,
+ AS_REGISTER_PING_INFO_ADDR_HIGH) << 32);
+ AS_DPRINT("Read back");
+
+ if (ping_info_phys != ping_info_phys_returned) {
+ printk(KERN_ERR "%s: Invalid result for ping info phys addr: expected 0x%llx, got 0x%llx\n",
+ __func__,
+ ping_info_phys, ping_info_phys_returned);
+ err = -EINVAL;
+ goto err_ping_info_failed;
+ }
+
+ mutex_unlock(&device_state->registers_lock);
+
+ filp->private_data = file_state;
+ return 0;
+
+err_ping_info_failed:
+err_gen_handle_failed:
+ kfree(file_state->allocated_blocks.blocks);
+ kfree(file_state->shared_allocated_blocks.blocks);
+err_file_state_blocks_alloc_failed:
+ kfree(file_state);
+err_file_state_alloc_failed:
+ free_page((unsigned long)ping_info);
+err_ping_info_alloc_failed:
+ return err;
+}
+
+static int as_release(struct inode *inode, struct file *filp)
+{
+ struct as_file_state *file_state = filp->private_data;
+ struct as_allocated_blocks *allocated_blocks =
+ &file_state->allocated_blocks;
+ struct as_allocated_blocks *shared_allocated_blocks =
+ &file_state->shared_allocated_blocks;
+ struct goldfish_address_space_ping *ping_info = file_state->ping_info;
+ struct as_device_state *state = file_state->device_state;
+ int blocks_size, shared_blocks_size;
+ int i;
+
+ WARN_ON(!state);
+ WARN_ON(!allocated_blocks);
+ WARN_ON(!allocated_blocks->blocks);
+ WARN_ON(allocated_blocks->blocks_size < 0);
+ WARN_ON(!shared_allocated_blocks);
+ WARN_ON(!shared_allocated_blocks->blocks);
+ WARN_ON(shared_allocated_blocks->blocks_size < 0);
+ WARN_ON(!ping_info);
+
+ blocks_size = allocated_blocks->blocks_size;
+ shared_blocks_size = shared_allocated_blocks->blocks_size;
+
+ mutex_lock(&state->registers_lock);
+
+ as_write_register(state->io_registers, AS_REGISTER_HANDLE,
+ file_state->handle);
+ as_run_command(state, AS_COMMAND_DESTROY_HANDLE);
+
+ for (i = 0; i < blocks_size; ++i) {
+ WARN_ON(as_ioctl_unallocate_block_locked_impl(
+ state, allocated_blocks->blocks[i].offset));
+ }
+
+ // Do not unalloc shared blocks as they are host-owned
+
+ mutex_unlock(&state->registers_lock);
+
+ kfree(allocated_blocks->blocks);
+ kfree(shared_allocated_blocks->blocks);
+ free_page((unsigned long)ping_info);
+ kfree(file_state);
+ return 0;
+}
+
+static int as_mmap_impl(struct as_device_state *state,
+ size_t size,
+ struct vm_area_struct *vma)
+{
+ unsigned long pfn = (state->address_area_phys_address >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ pfn,
+ size,
+ vma->vm_page_prot);
+}
+
+static int as_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct as_file_state *file_state = filp->private_data;
+ struct as_allocated_blocks *allocated_blocks =
+ &file_state->allocated_blocks;
+ struct as_allocated_blocks *shared_allocated_blocks =
+ &file_state->shared_allocated_blocks;
+ size_t size = PAGE_ALIGN(vma->vm_end - vma->vm_start);
+ int res_check_nonshared, res_check_shared;
+
+ WARN_ON(!allocated_blocks);
+
+ res_check_nonshared =
+ as_blocks_check_if_mine(allocated_blocks,
+ vma->vm_pgoff << PAGE_SHIFT,
+ size);
+
+ res_check_shared =
+ as_blocks_check_if_mine(shared_allocated_blocks,
+ vma->vm_pgoff << PAGE_SHIFT,
+ size);
+
+ if (res_check_nonshared && res_check_shared)
+ return res_check_nonshared;
+ else
+ return as_mmap_impl(file_state->device_state, size, vma);
+}
+
+static long as_ioctl_allocate_block_impl(
+ struct as_device_state *state,
+ struct goldfish_address_space_allocate_block *request)
+{
+ long res;
+
+ if (mutex_lock_interruptible(&state->registers_lock))
+ return -ERESTARTSYS;
+
+ res = as_ioctl_allocate_block_locked_impl(state,
+ &request->size,
+ &request->offset);
+ if (!res) {
+ request->phys_addr =
+ state->address_area_phys_address + request->offset;
+ }
+
+ mutex_unlock(&state->registers_lock);
+ return res;
+}
+
+static void
+as_ioctl_unallocate_block_impl(struct as_device_state *state, u64 offset)
+{
+ mutex_lock(&state->registers_lock);
+ WARN_ON(as_ioctl_unallocate_block_locked_impl(state, offset));
+ mutex_unlock(&state->registers_lock);
+}
+
+static long
+as_ioctl_allocate_block(struct as_allocated_blocks *allocated_blocks,
+ struct as_device_state *state,
+ void __user *ptr)
+{
+ long res;
+ struct goldfish_address_space_allocate_block request;
+
+ if (copy_from_user(&request, ptr, sizeof(request)))
+ return -EFAULT;
+
+ res = as_ioctl_allocate_block_impl(state, &request);
+ if (!res) {
+ res = as_blocks_insert(allocated_blocks,
+ request.offset,
+ request.size);
+
+ if (res) {
+ as_ioctl_unallocate_block_impl(state, request.offset);
+ } else if (copy_to_user(ptr, &request, sizeof(request))) {
+ as_ioctl_unallocate_block_impl(state, request.offset);
+ res = -EFAULT;
+ }
+ }
+
+ return res;
+}
+
+static long
+as_ioctl_unallocate_block(struct as_allocated_blocks *allocated_blocks,
+ struct as_device_state *state,
+ void __user *ptr)
+{
+ long res;
+ u64 offset;
+
+ if (copy_from_user(&offset, ptr, sizeof(offset)))
+ return -EFAULT;
+
+ res = as_blocks_remove(allocated_blocks, offset);
+ if (!res)
+ as_ioctl_unallocate_block_impl(state, offset);
+
+ return res;
+}
+
+static long
+as_ioctl_claim_block(struct as_allocated_blocks *allocated_blocks,
+ struct as_device_state *state,
+ void __user *ptr)
+{
+ long res;
+ struct goldfish_address_space_claim_shared request;
+
+ if (copy_from_user(&request, ptr, sizeof(request)))
+ return -EFAULT;
+
+ res = as_blocks_insert(allocated_blocks,
+ request.offset,
+ request.size);
+
+ if (res)
+ return res;
+ else if (copy_to_user(ptr, &request, sizeof(request)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+as_ioctl_unclaim_block(struct as_allocated_blocks *allocated_blocks,
+ struct as_device_state *state,
+ void __user *ptr)
+{
+ long res;
+ u64 offset;
+
+ if (copy_from_user(&offset, ptr, sizeof(offset)))
+ return -EFAULT;
+
+ res = as_blocks_remove(allocated_blocks, offset);
+ if (res)
+ pr_err("%s: as_blocks_remove failed (%ld)\n", __func__, res);
+
+ return res;
+}
+
+static long
+as_ioctl_ping_impl(struct goldfish_address_space_ping *ping_info,
+ struct as_device_state *state,
+ u32 handle,
+ void __user *ptr)
+{
+ struct goldfish_address_space_ping user_copy;
+
+ if (copy_from_user(&user_copy, ptr, sizeof(user_copy)))
+ return -EFAULT;
+
+ *ping_info = user_copy;
+
+ // Convert to phys addrs
+ ping_info->offset += state->address_area_phys_address;
+
+ mutex_lock(&state->registers_lock);
+ as_ping_impl(state, handle);
+ mutex_unlock(&state->registers_lock);
+
+ memcpy(&user_copy, ping_info, sizeof(user_copy));
+ if (copy_to_user(ptr, &user_copy, sizeof(user_copy)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long as_ioctl_ping(struct as_file_state *file_state, void __user *ptr)
+{
+ long ret;
+
+ mutex_lock(&file_state->ping_info_lock);
+ ret = as_ioctl_ping_impl(file_state->ping_info,
+ file_state->device_state,
+ file_state->handle,
+ ptr);
+ mutex_unlock(&file_state->ping_info_lock);
+
+ return ret;
+}
+
+static long as_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct as_file_state *file_state = filp->private_data;
+ long res = -ENOTTY;
+
+ switch (cmd) {
+ case GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK:
+ res = as_ioctl_allocate_block(&file_state->allocated_blocks,
+ file_state->device_state,
+ (void __user *)arg);
+ break;
+
+ case GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK:
+ res = as_ioctl_unallocate_block(&file_state->allocated_blocks,
+ file_state->device_state,
+ (void __user *)arg);
+ break;
+
+ case GOLDFISH_ADDRESS_SPACE_IOCTL_PING:
+ res = as_ioctl_ping(file_state, (void __user *)arg);
+ break;
+
+ case GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED:
+ res = as_ioctl_claim_block(
+ &file_state->shared_allocated_blocks,
+ file_state->device_state,
+ (void __user *)arg);
+ break;
+
+ case GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED:
+ res = as_ioctl_unclaim_block(
+ &file_state->shared_allocated_blocks,
+ file_state->device_state,
+ (void __user *)arg);
+ break;
+
+ default:
+ res = -ENOTTY;
+ }
+
+ return res;
+}
+
+static const struct file_operations userspace_file_operations = {
+ .owner = THIS_MODULE,
+ .open = as_open,
+ .release = as_release,
+ .mmap = as_mmap,
+ .unlocked_ioctl = as_ioctl,
+ .compat_ioctl = as_ioctl,
+};
+
+static void __iomem __must_check *ioremap_pci_bar(struct pci_dev *dev,
+ int bar_id)
+{
+ void __iomem *io;
+ unsigned long size = pci_resource_len(dev, bar_id);
+
+ if (!size)
+ return IOMEM_ERR_PTR(-ENXIO);
+
+ io = ioremap(pci_resource_start(dev, bar_id), size);
+ if (!io)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ return io;
+}
+
+static void __must_check *memremap_pci_bar(struct pci_dev *dev,
+ int bar_id,
+ unsigned long flags)
+{
+ void *mem;
+ unsigned long size = pci_resource_len(dev, bar_id);
+
+ if (!size)
+ return ERR_PTR(-ENXIO);
+
+ mem = memremap(pci_resource_start(dev, bar_id), size, flags);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ return mem;
+}
+
+
+static void fill_miscdevice(struct miscdevice *miscdev)
+{
+ memset(miscdev, 0, sizeof(*miscdev));
+
+ miscdev->minor = MISC_DYNAMIC_MINOR;
+ miscdev->name = GOLDFISH_ADDRESS_SPACE_DEVICE_NAME;
+ miscdev->fops = &userspace_file_operations;
+}
+
+static int __must_check
+create_as_device(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int res;
+ struct as_device_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ res = pci_request_region(dev,
+ AS_PCI_CONTROL_BAR_ID,
+ "Address space control");
+ if (res) {
+ pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
+ dev->bus->number,
+ dev->devfn,
+ AS_PCI_CONTROL_BAR_ID);
+ goto out_free_device_state;
+ }
+
+ res = pci_request_region(dev,
+ AS_PCI_AREA_BAR_ID,
+ "Address space area");
+ if (res) {
+ pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
+ dev->bus->number,
+ dev->devfn,
+ AS_PCI_AREA_BAR_ID);
+ goto out_release_control_bar;
+ }
+
+ fill_miscdevice(&state->miscdevice);
+ res = misc_register(&state->miscdevice);
+ if (res)
+ goto out_release_area_bar;
+
+ state->io_registers = ioremap_pci_bar(dev,
+ AS_PCI_CONTROL_BAR_ID);
+ if (IS_ERR(state->io_registers)) {
+ res = PTR_ERR(state->io_registers);
+ goto out_misc_deregister;
+ }
+
+ state->address_area = memremap_pci_bar(dev,
+ AS_PCI_AREA_BAR_ID,
+ MEMREMAP_WB);
+ if (IS_ERR(state->address_area)) {
+ res = PTR_ERR(state->address_area);
+ goto out_iounmap;
+ }
+
+ state->address_area_phys_address =
+ pci_resource_start(dev, AS_PCI_AREA_BAR_ID);
+
+ as_write_register(state->io_registers,
+ AS_REGISTER_GUEST_PAGE_SIZE,
+ PAGE_SIZE);
+ as_write_register(state->io_registers,
+ AS_REGISTER_PHYS_START_LOW,
+ lower_32_bits(state->address_area_phys_address));
+ as_write_register(state->io_registers,
+ AS_REGISTER_PHYS_START_HIGH,
+ upper_32_bits(state->address_area_phys_address));
+
+ state->dev = dev;
+ mutex_init(&state->registers_lock);
+
+ pci_set_drvdata(dev, state);
+ return 0;
+
+out_iounmap:
+ iounmap(state->io_registers);
+out_misc_deregister:
+ misc_deregister(&state->miscdevice);
+out_release_area_bar:
+ pci_release_region(dev, AS_PCI_AREA_BAR_ID);
+out_release_control_bar:
+ pci_release_region(dev, AS_PCI_CONTROL_BAR_ID);
+out_free_device_state:
+ kzfree(state);
+
+ return res;
+}
+
+static void as_pci_destroy_device(struct as_device_state *state)
+{
+ memunmap(state->address_area);
+ iounmap(state->io_registers);
+ misc_deregister(&state->miscdevice);
+ pci_release_region(state->dev, AS_PCI_AREA_BAR_ID);
+ pci_release_region(state->dev, AS_PCI_CONTROL_BAR_ID);
+ kfree(state);
+}
+
+static int __must_check
+as_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int res;
+ u8 hardware_revision;
+
+ res = pci_enable_device(dev);
+ if (res)
+ return res;
+
+ res = pci_read_config_byte(dev, PCI_REVISION_ID, &hardware_revision);
+ if (res)
+ goto out_disable_pci;
+
+ switch (hardware_revision) {
+ case 1:
+ res = create_as_device(dev, id);
+ break;
+
+ default:
+ res = -ENODEV;
+ goto out_disable_pci;
+ }
+
+ return 0;
+
+out_disable_pci:
+ pci_disable_device(dev);
+
+ return res;
+}
+
+static void as_pci_remove(struct pci_dev *dev)
+{
+ struct as_device_state *state = pci_get_drvdata(dev);
+
+ as_pci_destroy_device(state);
+ pci_disable_device(dev);
+}
+
+static const struct pci_device_id as_pci_tbl[] = {
+ { PCI_DEVICE(AS_PCI_VENDOR_ID, AS_PCI_DEVICE_ID), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, as_pci_tbl);
+
+static struct pci_driver goldfish_address_space_driver = {
+ .name = GOLDFISH_ADDRESS_SPACE_DEVICE_NAME,
+ .id_table = as_pci_tbl,
+ .probe = as_pci_probe,
+ .remove = as_pci_remove,
+};
+
+module_pci_driver(goldfish_address_space_driver);
diff --git a/goldfish_battery.c b/goldfish_battery.c
new file mode 100644
index 0000000..08f789c
--- /dev/null
+++ b/goldfish_battery.c
@@ -0,0 +1,300 @@
+/*
+ * Power supply driver for the goldfish emulator
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+
+#define POWER_SUPPLY_CURRENT_UA (900000) /* in uAmp */
+#define POWER_SUPPLY_CHARGE_FULL_UAH (300000) /* in uAmp*H */
+
+struct goldfish_battery_data {
+ void __iomem *reg_base;
+ int irq;
+ spinlock_t lock;
+
+ struct power_supply *battery;
+ struct power_supply *ac;
+};
+
+#define GOLDFISH_BATTERY_READ(data, addr) \
+ (readl(data->reg_base + addr))
+#define GOLDFISH_BATTERY_WRITE(data, addr, x) \
+ (writel(x, data->reg_base + addr))
+
+/*
+ * Temporary variable used between goldfish_battery_probe() and
+ * goldfish_battery_open().
+ */
+static struct goldfish_battery_data *battery_data;
+
+enum {
+ /* status register */
+ BATTERY_INT_STATUS = 0x00,
+ /* set this to enable IRQ */
+ BATTERY_INT_ENABLE = 0x04,
+ BATTERY_AC_ONLINE = 0x08,
+ BATTERY_STATUS = 0x0C,
+ BATTERY_HEALTH = 0x10,
+ BATTERY_PRESENT = 0x14,
+ BATTERY_CAPACITY = 0x18,
+ BATTERY_VOLTAGE = 0x1C,
+ BATTERY_TEMP = 0x20,
+ BATTERY_CHARGE_COUNTER = 0x24,
+ BATTERY_VOLTAGE_MAX = 0x28,
+ BATTERY_CURRENT_MAX = 0x2c,
+ BATTERY_STATUS_CHANGED = 1U << 0,
+ AC_STATUS_CHANGED = 1U << 1,
+ BATTERY_INT_MASK = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED,
+};
+
+
+static int goldfish_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct goldfish_battery_data *data = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_AC_ONLINE);
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_VOLTAGE_MAX);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CURRENT_MAX);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int goldfish_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct goldfish_battery_data *data = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_STATUS);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_HEALTH);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_PRESENT);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CAPACITY);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = GOLDFISH_BATTERY_READ(data,
+ BATTERY_CHARGE_COUNTER);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = POWER_SUPPLY_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = POWER_SUPPLY_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = POWER_SUPPLY_CHARGE_FULL_UAH;
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = 10;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property goldfish_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+};
+
+static enum power_supply_property goldfish_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static irqreturn_t goldfish_battery_interrupt(int irq, void *dev_id)
+{
+ unsigned long irq_flags;
+ struct goldfish_battery_data *data = dev_id;
+ uint32_t status;
+
+ spin_lock_irqsave(&data->lock, irq_flags);
+
+ /* read status flags, which will clear the interrupt */
+ status = GOLDFISH_BATTERY_READ(data, BATTERY_INT_STATUS);
+ status &= BATTERY_INT_MASK;
+
+ if (status & BATTERY_STATUS_CHANGED)
+ power_supply_changed(data->battery);
+ if (status & AC_STATUS_CHANGED)
+ power_supply_changed(data->ac);
+
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static const struct power_supply_desc battery_desc = {
+ .properties = goldfish_battery_props,
+ .num_properties = ARRAY_SIZE(goldfish_battery_props),
+ .get_property = goldfish_battery_get_property,
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+};
+
+static const struct power_supply_desc ac_desc = {
+ .properties = goldfish_ac_props,
+ .num_properties = ARRAY_SIZE(goldfish_ac_props),
+ .get_property = goldfish_ac_get_property,
+ .name = "ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+};
+
+static int goldfish_battery_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_battery_data *data;
+ struct power_supply_config psy_cfg = {};
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "platform_get_resource failed\n");
+ return -ENODEV;
+ }
+
+ data->reg_base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (data->reg_base == NULL) {
+ dev_err(&pdev->dev, "unable to remap MMIO\n");
+ return -ENOMEM;
+ }
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, data->irq, goldfish_battery_interrupt,
+ IRQF_SHARED, pdev->name, data);
+ if (ret)
+ return ret;
+
+ psy_cfg.drv_data = data;
+
+ data->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
+ if (IS_ERR(data->ac))
+ return PTR_ERR(data->ac);
+
+ data->battery = power_supply_register(&pdev->dev, &battery_desc,
+ &psy_cfg);
+ if (IS_ERR(data->battery)) {
+ power_supply_unregister(data->ac);
+ return PTR_ERR(data->battery);
+ }
+
+ platform_set_drvdata(pdev, data);
+ battery_data = data;
+
+ GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK);
+ return 0;
+}
+
+static int goldfish_battery_remove(struct platform_device *pdev)
+{
+ struct goldfish_battery_data *data = platform_get_drvdata(pdev);
+
+ power_supply_unregister(data->battery);
+ power_supply_unregister(data->ac);
+ battery_data = NULL;
+ return 0;
+}
+
+static const struct of_device_id goldfish_battery_of_match[] = {
+ { .compatible = "google,goldfish-battery", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_battery_of_match);
+
+static const struct acpi_device_id goldfish_battery_acpi_match[] = {
+ { "GFSH0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
+
+static struct platform_driver goldfish_battery_device = {
+ .probe = goldfish_battery_probe,
+ .remove = goldfish_battery_remove,
+ .driver = {
+ .name = "goldfish-battery",
+ .of_match_table = goldfish_battery_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_battery_acpi_match),
+ }
+};
+module_platform_driver(goldfish_battery_device);
+
+MODULE_AUTHOR("Mike Lockwood lockwood@android.com");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Battery driver for the Goldfish emulator");
diff --git a/goldfish_cpufreq_stats.c b/goldfish_cpufreq_stats.c
new file mode 100644
index 0000000..383f3e7
--- /dev/null
+++ b/goldfish_cpufreq_stats.c
@@ -0,0 +1,83 @@
+/*
+ * drivers/cpufreq/cpufreq_stats.c
+ *
+ * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define to_attr(a) container_of(a, struct freq_attr, attr)
+
+static unsigned long long fake_time;
+
+static DEFINE_PER_CPU(struct kobject *, cpufreq_kobj);
+
+static ssize_t time_in_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+
+ len = sprintf(buf, "%lu %llu\n", 3000000000, ++fake_time);
+ return len;
+}
+
+static struct kobj_attribute time_in_state =
+ __ATTR_RO_MODE(time_in_state, 0444);
+
+static struct attribute *cpufreq_stats_attrs[] = {
+ &time_in_state.attr,
+ NULL
+};
+static struct attribute_group stats_attr_group = {
+ .attrs = cpufreq_stats_attrs,
+ .name = "stats"
+};
+
+static void cpufreq_stats_free(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+ struct kobject **kobj = &per_cpu(cpufreq_kobj, cpu);
+
+ sysfs_remove_group(&dev->kobj, &stats_attr_group);
+ kobject_put(*kobj);
+ *kobj=NULL;
+}
+
+static void cpufreq_stats_create(unsigned int cpu)
+{
+ int ret;
+ struct device *dev = get_cpu_device(cpu);
+ struct kobject **kobj = &per_cpu(cpufreq_kobj, cpu);
+
+ *kobj = kobject_create_and_add("cpufreq", &dev->kobj);
+ ret = sysfs_create_group(*kobj, &stats_attr_group);
+}
+
+static int __init goldfish_cpufreq_stats_init(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ cpufreq_stats_create(cpu);
+
+ return 0;
+}
+
+static void __exit goldfish_cpufreq_stats_exit(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ cpufreq_stats_free(cpu);
+}
+
+module_init(goldfish_cpufreq_stats_init);
+module_exit(goldfish_cpufreq_stats_exit);
diff --git a/goldfish_pipe.h b/goldfish_pipe.h
new file mode 100644
index 0000000..385e306
--- /dev/null
+++ b/goldfish_pipe.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef GOLDFISH_PIPE_H
+#define GOLDFISH_PIPE_H
+
+#define DEVICE_NAME "goldfish_pipe"
+
+struct goldfish_pipe_dev_base {
+ /* the destructor, the pointer is set in init */
+ int (*deinit)(void *pipe_dev, struct platform_device *pdev);
+};
+
+/* The entry point to the pipe v1 driver */
+int goldfish_pipe_device_v1_init(struct platform_device *pdev,
+ void __iomem *base,
+ int irq);
+
+/* The entry point to the pipe v2 driver */
+int goldfish_pipe_device_v2_init(struct platform_device *pdev,
+ char __iomem *base,
+ int irq);
+
+#endif /* GOLDFISH_PIPE_H */
diff --git a/goldfish_pipe_base.c b/goldfish_pipe_base.c
new file mode 100644
index 0000000..33b7bcf
--- /dev/null
+++ b/goldfish_pipe_base.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
+ * Copyright (C) 2011-2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This source file contains the implementation of a special device driver
+ * that intends to provide a *very* fast communication channel between the
+ * guest system and the QEMU emulator.
+ *
+ * Usage from the guest is simply the following (error handling simplified):
+ *
+ * int fd = open("/dev/qemu_pipe",O_RDWR);
+ * .... write() or read() through the pipe.
+ *
+ * This driver doesn't deal with the exact protocol used during the session.
+ * It is intended to be as simple as something like:
+ *
+ * // do this _just_ after opening the fd to connect to a specific
+ * // emulator service.
+ * const char* msg = "<pipename>";
+ * if (write(fd, msg, strlen(msg)+1) < 0) {
+ * ... could not connect to <pipename> service
+ * close(fd);
+ * }
+ *
+ * // after this, simply read() and write() to communicate with the
+ * // service. Exact protocol details left as an exercise to the reader.
+ *
+ * This driver is very fast because it doesn't copy any data through
+ * intermediate buffers, since the emulator is capable of translating
+ * guest user addresses into host ones.
+ *
+ * Note that we must however ensure that each user page involved in the
+ * exchange is properly mapped during a transfer.
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include "goldfish_pipe_qemu.h"
+#include "goldfish_pipe.h"
+
+/*
+ * Update this when something changes in the driver's behavior so the host
+ * can benefit from knowing it
+ * Notes:
+ * version 2 was an intermediate release and isn't supported anymore.
+ * version 3 is goldfish_pipe_v2 without DMA support.
+ * version 4 (current) is goldfish_pipe_v2 with DMA support.
+ */
+enum {
+ PIPE_DRIVER_VERSION = 4,
+ PIPE_CURRENT_DEVICE_VERSION = 2
+};
+
+static int goldfish_pipe_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ char __iomem *base;
+ int irq;
+ int version;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r || resource_size(r) < PAGE_SIZE) {
+ dev_err(&pdev->dev, "can't allocate i/o page\n");
+ return -EINVAL;
+ }
+ base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -EINVAL;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r)
+ return -EINVAL;
+
+ irq = r->start;
+
+ /*
+ * Exchange the versions with the host device
+ *
+ * Note: v1 driver used to not report its version, so we write it before
+ * reading device version back: this allows the host implementation to
+ * detect the old driver (if there was no version write before read).
+ */
+ writel(PIPE_DRIVER_VERSION, base + PIPE_V2_REG_VERSION);
+ version = readl(base + PIPE_V2_REG_VERSION);
+
+ if (version < PIPE_CURRENT_DEVICE_VERSION)
+ return goldfish_pipe_device_v1_init(pdev, base, irq);
+ else
+ return goldfish_pipe_device_v2_init(pdev, base, irq);
+}
+
+static int goldfish_pipe_remove(struct platform_device *pdev)
+{
+ struct goldfish_pipe_dev_base *dev = platform_get_drvdata(pdev);
+
+ return dev->deinit(dev, pdev);
+}
+
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+ { "GFSH0003", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+ { .compatible = "google,android-pipe", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
+static struct platform_driver goldfish_pipe_driver = {
+ .probe = goldfish_pipe_probe,
+ .remove = goldfish_pipe_remove,
+ .driver = {
+ .name = "goldfish_pipe",
+ .of_match_table = goldfish_pipe_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
+ }
+};
+
+module_platform_driver(goldfish_pipe_driver);
+MODULE_AUTHOR("David Turner <digit@google.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/goldfish_pipe_qemu.h b/goldfish_pipe_qemu.h
new file mode 100644
index 0000000..134c32c
--- /dev/null
+++ b/goldfish_pipe_qemu.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * IMPORTANT: The following constants must match the ones used and defined in
+ * external/qemu/include/hw/misc/goldfish_pipe.h
+ */
+
+#ifndef GOLDFISH_PIPE_QEMU_H
+#define GOLDFISH_PIPE_QEMU_H
+
+/* List of bitflags returned in status of CMD_POLL command */
+enum PipePollFlags {
+ PIPE_POLL_IN = 1 << 0,
+ PIPE_POLL_OUT = 1 << 1,
+ PIPE_POLL_HUP = 1 << 2
+};
+
+/* Possible status values used to signal errors */
+enum PipeErrors {
+ PIPE_ERROR_INVAL = -1,
+ PIPE_ERROR_AGAIN = -2,
+ PIPE_ERROR_NOMEM = -3,
+ PIPE_ERROR_IO = -4
+};
+
+/* Bit-flags used to signal events from the emulator */
+enum PipeWakeFlags {
+ /* emulator closed pipe */
+ PIPE_WAKE_CLOSED = 1 << 0,
+
+ /* pipe can now be read from */
+ PIPE_WAKE_READ = 1 << 1,
+
+ /* pipe can now be written to */
+ PIPE_WAKE_WRITE = 1 << 2,
+
+ /* unlock this pipe's DMA buffer */
+ PIPE_WAKE_UNLOCK_DMA = 1 << 3,
+
+ /* unlock DMA buffer of the pipe shared to this pipe */
+ PIPE_WAKE_UNLOCK_DMA_SHARED = 1 << 4,
+};
+
+/* Possible pipe closing reasons */
+enum PipeCloseReason {
+ /* guest sent a close command */
+ PIPE_CLOSE_GRACEFUL = 0,
+
+ /* guest rebooted, we're closing the pipes */
+ PIPE_CLOSE_REBOOT = 1,
+
+ /* close old pipes on snapshot load */
+ PIPE_CLOSE_LOAD_SNAPSHOT = 2,
+
+ /* some unrecoverable error on the pipe */
+ PIPE_CLOSE_ERROR = 3,
+};
+
+/* Bit flags for the 'flags' field */
+enum PipeFlagsBits {
+ BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
+ BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
+ BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+};
+
+enum PipeV1Regs {
+ /* write: value = command */
+ PIPE_V1_REG_COMMAND = 0x00,
+ /* read */
+ PIPE_V1_REG_STATUS = 0x04,
+ /* read/write: channel id */
+ PIPE_V1_REG_CHANNEL = 0x08,
+ /* read/write: channel id */
+ PIPE_V1_REG_CHANNEL_HIGH = 0x30,
+ /* read/write: buffer size */
+ PIPE_V1_REG_SIZE = 0x0C,
+ /* write: physical address */
+ PIPE_V1_REG_ADDRESS = 0x10,
+ /* write: physical address */
+ PIPE_V1_REG_ADDRESS_HIGH = 0x34,
+ /* read: wake flags */
+ PIPE_V1_REG_WAKES = 0x14,
+ /* read/write: batch data address */
+ PIPE_V1_REG_PARAMS_ADDR_LOW = 0x18,
+ /* read/write: batch data address */
+ PIPE_V1_REG_PARAMS_ADDR_HIGH = 0x1C,
+ /* write: batch access */
+ PIPE_V1_REG_ACCESS_PARAMS = 0x20,
+ /* read: device version */
+ PIPE_V1_REG_VERSION = 0x24,
+};
+
+enum PipeV2Regs {
+ PIPE_V2_REG_CMD = 0,
+
+ PIPE_V2_REG_SIGNAL_BUFFER_HIGH = 4,
+ PIPE_V2_REG_SIGNAL_BUFFER = 8,
+ PIPE_V2_REG_SIGNAL_BUFFER_COUNT = 12,
+
+ PIPE_V2_REG_OPEN_BUFFER_HIGH = 20,
+ PIPE_V2_REG_OPEN_BUFFER = 24,
+
+ PIPE_V2_REG_VERSION = 36,
+
+ PIPE_V2_REG_GET_SIGNALLED = 48,
+};
+
+enum PipeCmdCode {
+ /* to be used by the pipe device itself */
+ PIPE_CMD_OPEN = 1,
+
+ PIPE_CMD_CLOSE,
+ PIPE_CMD_POLL,
+ PIPE_CMD_WRITE,
+ PIPE_CMD_WAKE_ON_WRITE,
+ PIPE_CMD_READ,
+ PIPE_CMD_WAKE_ON_READ,
+
+ /*
+ * TODO(zyy): implement a deferred read/write execution to allow
+ * parallel processing of pipe operations on the host.
+ */
+ PIPE_CMD_WAKE_ON_DONE_IO,
+ PIPE_CMD_DMA_HOST_MAP,
+ PIPE_CMD_DMA_HOST_UNMAP,
+};
+
+#endif /* GOLDFISH_PIPE_QEMU_H */
diff --git a/goldfish_pipe_v1.c b/goldfish_pipe_v1.c
new file mode 100644
index 0000000..759b0b8
--- /dev/null
+++ b/goldfish_pipe_v1.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This source file contains the implementation of the legacy version of
+ * a goldfish pipe device driver. See goldfish_pipe_v2.c for the current
+ * version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/bug.h>
+#include <linux/goldfish.h>
+
+#include "goldfish_pipe_qemu.h"
+#include "goldfish_pipe.h"
+
+#define MAX_PAGES_TO_GRAB 32
+
+/* A value that will not be set by qemu emulator */
+#define INITIAL_BATCH_RESULT (0xdeadbeaf)
+
+struct goldfish_pipe_dev;
+
+/* This data type models a given pipe instance */
+struct goldfish_pipe {
+ struct goldfish_pipe_dev *dev;
+
+ /* The wake flags pipe is waiting for
+ * Note: not protected with any lock, uses atomic operations
+ * and barriers to make it thread-safe.
+ */
+ unsigned long flags;
+
+ wait_queue_head_t wake_queue;
+
+ /* protects access to the pipe */
+ struct mutex lock;
+};
+
+struct access_params {
+ unsigned long channel;
+ u32 size;
+ unsigned long address;
+ u32 cmd;
+ u32 result;
+ /* reserved for future extension */
+ u32 flags;
+};
+
+/* The driver state. Holds a reference to the i/o page used to
+ * communicate with the emulator, and a wake queue for blocked tasks
+ * waiting to be awoken.
+ */
+struct goldfish_pipe_dev {
+ /* Needed for the 'remove' call */
+ struct goldfish_pipe_dev_base super;
+
+ /* ptr to platform device's device struct */
+ struct device *pdev_dev;
+
+ /* the base address for MMIO */
+ char __iomem *base;
+
+ struct access_params *aps;
+
+ struct miscdevice miscdev;
+
+ /* Global device spinlock */
+ spinlock_t lock;
+};
+
+static int goldfish_pipe_device_deinit(void *raw_dev,
+ struct platform_device *pdev);
+
+static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
+{
+ unsigned long flags;
+ u32 status;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ gf_write_ptr(pipe, dev->base + PIPE_V1_REG_CHANNEL,
+ dev->base + PIPE_V1_REG_CHANNEL_HIGH);
+ writel(cmd, dev->base + PIPE_V1_REG_COMMAND);
+ status = readl(dev->base + PIPE_V1_REG_STATUS);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return status;
+}
+
+static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
+{
+ unsigned long flags;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ gf_write_ptr(pipe, dev->base + PIPE_V1_REG_CHANNEL,
+ dev->base + PIPE_V1_REG_CHANNEL_HIGH);
+ writel(cmd, dev->base + PIPE_V1_REG_COMMAND);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* This function converts an error code returned by the emulator through
+ * the PIPE_V1_REG_STATUS i/o register into a valid negative errno value.
+ */
+static int goldfish_pipe_error_convert(int status)
+{
+ switch (status) {
+ case PIPE_ERROR_AGAIN:
+ return -EAGAIN;
+ case PIPE_ERROR_NOMEM:
+ return -ENOMEM;
+ case PIPE_ERROR_IO:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Notice: QEMU will return 0 for un-known register access, indicating
+ * access_params is supported or not
+ */
+static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
+ struct access_params *aps)
+{
+ u32 aph, apl;
+ u64 paddr;
+
+ aph = readl(dev->base + PIPE_V1_REG_PARAMS_ADDR_HIGH);
+ apl = readl(dev->base + PIPE_V1_REG_PARAMS_ADDR_LOW);
+
+ paddr = ((u64)aph << 32) | apl;
+ return paddr == (__pa(aps));
+}
+
+static int setup_access_params_addr(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
+{
+ u64 paddr;
+ struct access_params *aps;
+
+ aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params),
+ GFP_KERNEL);
+ if (!aps)
+ return -ENOMEM;
+
+ paddr = __pa(aps);
+ writel((u32)(paddr >> 32), dev->base + PIPE_V1_REG_PARAMS_ADDR_HIGH);
+ writel((u32)paddr, dev->base + PIPE_V1_REG_PARAMS_ADDR_LOW);
+
+ if (valid_batchbuffer_addr(dev, aps)) {
+ dev->aps = aps;
+ return 0;
+ }
+
+ devm_kfree(&pdev->dev, aps);
+ return -EFAULT;
+}
+
+static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
+ unsigned long address, unsigned long avail,
+ struct goldfish_pipe *pipe, int *status)
+{
+ struct access_params *aps = dev->aps;
+
+ if (!aps)
+ return -EINVAL;
+
+ aps->result = INITIAL_BATCH_RESULT;
+ aps->channel = (unsigned long)pipe;
+ aps->size = avail;
+ aps->address = address;
+ aps->cmd = cmd;
+ writel(cmd, dev->base + PIPE_V1_REG_ACCESS_PARAMS);
+
+ /*
+ * If the aps->result has not changed, that means
+ * that the batch command failed
+ */
+ if (aps->result == INITIAL_BATCH_RESULT)
+ return -EINVAL;
+
+ *status = aps->result;
+ return 0;
+}
+
+static int transfer_pages(struct goldfish_pipe_dev *dev,
+ struct goldfish_pipe *pipe,
+ int cmd,
+ unsigned long xaddr,
+ unsigned long size)
+{
+ unsigned long irq_flags;
+ int status = 0;
+
+ spin_lock_irqsave(&dev->lock, irq_flags);
+ if (access_with_param(dev, cmd, xaddr, size, pipe, &status)) {
+ gf_write_ptr(pipe, dev->base + PIPE_V1_REG_CHANNEL,
+ dev->base + PIPE_V1_REG_CHANNEL_HIGH);
+
+ writel(size, dev->base + PIPE_V1_REG_SIZE);
+
+ gf_write_ptr((void *)xaddr,
+ dev->base + PIPE_V1_REG_ADDRESS,
+ dev->base + PIPE_V1_REG_ADDRESS_HIGH);
+
+ writel(cmd, dev->base + PIPE_V1_REG_COMMAND);
+
+ status = readl(dev->base + PIPE_V1_REG_STATUS);
+ }
+ spin_unlock_irqrestore(&dev->lock, irq_flags);
+
+ return status;
+}
+
+static unsigned long translate_address(const struct page *page,
+ unsigned long addr)
+{
+ return page_to_phys(page) | (addr & ~PAGE_MASK);
+}
+
+static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
+ size_t bufflen, int is_write)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+ unsigned long address;
+ unsigned long address_end;
+ const int wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+ const int pipe_cmd = is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ;
+ int count = 0;
+ int ret = -EINVAL;
+
+ /* If the emulator already closed the pipe, no need to go further */
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+
+ /* Null reads or writes succeeds */
+ if (unlikely(bufflen == 0))
+ return 0;
+
+ /* Check the buffer range for access */
+ if (!access_ok(buffer, bufflen))
+ return -EFAULT;
+
+ address = (unsigned long)buffer;
+ address_end = address + bufflen;
+
+ /* Serialize access to the pipe */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ while (address < address_end) {
+ struct page *pages[MAX_PAGES_TO_GRAB];
+ unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ unsigned long avail;
+ unsigned long xaddr;
+ unsigned long xaddr_prev;
+ long first_page;
+ long last_page;
+ long requested_pages;
+ int status;
+ int n_pages;
+ int page_i;
+ int num_contiguous_pages;
+
+ /*
+ * Attempt to grab multiple physically contiguous pages.
+ */
+ first_page = address & PAGE_MASK;
+ last_page = (address_end - 1) & PAGE_MASK;
+ requested_pages =
+ min(((last_page - first_page) >> PAGE_SHIFT) + 1,
+ (long)MAX_PAGES_TO_GRAB);
+
+ ret = get_user_pages_fast(first_page, requested_pages,
+ !is_write, pages);
+ if (ret < 0) {
+ dev_err(dev->pdev_dev,
+ "%s: get_user_pages_fast failed: %d\n",
+ __func__, ret);
+ break;
+ } else if (!ret) {
+ dev_err(dev->pdev_dev,
+ "%s: error: no pages returned, requested %ld\n",
+ __func__, requested_pages);
+ break;
+ }
+
+ n_pages = ret;
+ xaddr = translate_address(pages[0], address);
+ xaddr_prev = xaddr;
+ num_contiguous_pages = 1;
+ for (page_i = 1; page_i < n_pages; page_i++) {
+ unsigned long xaddr_i;
+
+ xaddr_i = translate_address(pages[page_i], address);
+ if (xaddr_i == xaddr_prev + PAGE_SIZE) {
+ page_end += PAGE_SIZE;
+ xaddr_prev = xaddr_i;
+ num_contiguous_pages++;
+ } else {
+ dev_err(dev->pdev_dev,
+ "%s: discontinuous page boundary: %d "
+ "pages instead\n",
+ __func__, page_i);
+ break;
+ }
+ }
+ avail = min(page_end, address_end) - address;
+
+ status = transfer_pages(dev, pipe, pipe_cmd, xaddr, avail);
+
+ for (page_i = 0; page_i < n_pages; page_i++) {
+ if (status > 0 && !is_write &&
+ page_i < num_contiguous_pages)
+ set_page_dirty(pages[page_i]);
+
+ put_page(pages[page_i]);
+ }
+
+ if (status > 0) { /* Correct transfer */
+ count += status;
+ address += status;
+ continue;
+ } else if (status == 0) { /* EOF */
+ ret = 0;
+ break;
+ } else if (status < 0 && count > 0) {
+ /*
+ * An error occurred and we already transferred
+ * something on one of the previous pages.
+ * Just return what we already copied and log this
+ * err.
+ *
+ * Note: This seems like an incorrect approach but
+ * cannot change it until we check if any user space
+ * ABI relies on this behavior.
+ */
+ if (status != PIPE_ERROR_AGAIN)
+ dev_err_ratelimited(dev->pdev_dev,
+ "backend returned error %d on %s\n",
+ status, is_write ? "write" : "read");
+ ret = 0;
+ break;
+ }
+
+ /*
+ * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+ * non-blocking mode, just return the error code.
+ */
+ if (status != PIPE_ERROR_AGAIN ||
+ (filp->f_flags & O_NONBLOCK) != 0) {
+ ret = goldfish_pipe_error_convert(status);
+ break;
+ }
+
+ /*
+ * The backend blocked the read/write, wait until the backend
+ * tells us it's ready to process more data.
+ */
+ set_bit(wake_bit, &pipe->flags);
+
+ /* Tell the emulator we're going to wait for a wake event */
+ goldfish_cmd(pipe, pipe_cmd);
+
+ /* Unlock the pipe, then wait for the wake signal */
+ mutex_unlock(&pipe->lock);
+
+ while (test_bit(wake_bit, &pipe->flags)) {
+ if (wait_event_interruptible(pipe->wake_queue,
+ !test_bit(wake_bit, &pipe->flags)))
+ return -ERESTARTSYS;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+ }
+
+ /* Try to re-acquire the lock */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+ }
+ mutex_unlock(&pipe->lock);
+
+ return (ret < 0) ? ret : count;
+}
+
+static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
+ size_t bufflen, loff_t *ppos)
+{
+ return goldfish_pipe_read_write(filp, buffer, bufflen,
+ /* is_write */ 0);
+}
+
+static ssize_t goldfish_pipe_write(struct file *filp,
+ const char __user *buffer, size_t bufflen,
+ loff_t *ppos)
+{
+ return goldfish_pipe_read_write(filp, (char __user *)buffer,
+ bufflen, /* is_write */ 1);
+}
+
+static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ unsigned int mask = 0;
+ int status;
+
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ poll_wait(filp, &pipe->wake_queue, wait);
+
+ status = goldfish_cmd_status(pipe, PIPE_CMD_POLL);
+
+ mutex_unlock(&pipe->lock);
+
+ if (status & PIPE_POLL_IN)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (status & PIPE_POLL_OUT)
+ mask |= POLLOUT | POLLWRNORM;
+
+ if (status & PIPE_POLL_HUP)
+ mask |= POLLHUP;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ mask |= POLLERR;
+
+ return mask;
+}
+
+static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+{
+ struct goldfish_pipe_dev *dev = dev_id;
+ unsigned long irq_flags;
+ int count = 0;
+
+ /*
+ * We're going to read from the emulator a list of (channel,flags)
+ * pairs corresponding to the wake events that occurred on each
+ * blocked pipe (i.e. channel).
+ */
+ spin_lock_irqsave(&dev->lock, irq_flags);
+ for (;;) {
+ /* First read the channel, 0 means the end of the list */
+ struct goldfish_pipe *pipe;
+ unsigned long wakes;
+ unsigned long channel = 0;
+
+#ifdef CONFIG_64BIT
+ channel =
+ (u64)readl(dev->base + PIPE_V1_REG_CHANNEL_HIGH) << 32;
+#endif
+ channel |= readl(dev->base + PIPE_V1_REG_CHANNEL);
+ if (!channel)
+ break;
+
+ /* Convert channel to struct pipe pointer + read wake flags */
+ wakes = readl(dev->base + PIPE_V1_REG_WAKES);
+ pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
+
+ /* Did the emulator just closed a pipe? */
+ if (wakes & PIPE_WAKE_CLOSED) {
+ set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
+ wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
+ }
+ if (wakes & PIPE_WAKE_READ)
+ clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
+ if (wakes & PIPE_WAKE_WRITE)
+ clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
+
+ wake_up_interruptible(&pipe->wake_queue);
+ count++;
+ }
+ spin_unlock_irqrestore(&dev->lock, irq_flags);
+
+ return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* A helper function to get the instance of goldfish_pipe_dev from file */
+static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
+}
+
+/**
+ * goldfish_pipe_open - open a channel to the AVD
+ * @inode: inode of device
+ * @file: file struct of opener
+ *
+ * Create a new pipe link between the emulator and the use application.
+ * Each new request produces a new pipe.
+ *
+ * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
+ * right now so this is fine. A move to 64bit will need this addressing
+ */
+static int goldfish_pipe_open(struct inode *inode, struct file *file)
+{
+ struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
+ struct goldfish_pipe *pipe;
+ int status;
+
+ /* Allocate new pipe kernel object */
+ pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+ if (!pipe)
+ return -ENOMEM;
+
+ pipe->dev = dev;
+ init_waitqueue_head(&pipe->wake_queue);
+ mutex_init(&pipe->lock);
+
+ /*
+ * Now, tell the emulator we're opening a new pipe. We use the
+ * pipe object's address as the channel identifier for simplicity.
+ */
+
+ status = goldfish_cmd_status(pipe, PIPE_CMD_OPEN);
+ if (status < 0) {
+ kfree(pipe);
+ return status;
+ }
+
+ /* All is done, save the pipe into the file's private data field */
+ file->private_data = pipe;
+ return 0;
+}
+
+static int goldfish_pipe_release(struct inode *inode, struct file *filp)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+
+ pr_debug("%s: call. pipe=%p file=%p\n", __func__, pipe, filp);
+ /* The guest is closing the channel, so tell the emulator right now */
+ goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+ kfree(pipe);
+ filp->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations goldfish_pipe_fops = {
+ .owner = THIS_MODULE,
+ .read = goldfish_pipe_read,
+ .write = goldfish_pipe_write,
+ .poll = goldfish_pipe_poll,
+ .open = goldfish_pipe_open,
+ .release = goldfish_pipe_release,
+};
+
+static void init_miscdevice(struct miscdevice *miscdev)
+{
+ memset(miscdev, 0, sizeof(*miscdev));
+
+ miscdev->minor = MISC_DYNAMIC_MINOR;
+ miscdev->name = DEVICE_NAME;
+ miscdev->fops = &goldfish_pipe_fops;
+};
+
+static int goldfish_pipe_device_deinit(void *raw_dev,
+ struct platform_device *pdev);
+
+int goldfish_pipe_device_v1_init(struct platform_device *pdev,
+ void __iomem *base,
+ int irq)
+{
+ struct goldfish_pipe_dev *dev;
+ int err;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->super.deinit = &goldfish_pipe_device_deinit;
+ dev->pdev_dev = &pdev->dev;
+ spin_lock_init(&dev->lock);
+
+ err = devm_request_irq(&pdev->dev, irq,
+ &goldfish_pipe_interrupt, IRQF_SHARED,
+ DEVICE_NAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
+ return err;
+ }
+
+ init_miscdevice(&dev->miscdev);
+ err = misc_register(&dev->miscdev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register v1 device\n");
+ return err;
+ }
+
+ setup_access_params_addr(pdev, dev);
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+}
+
+static int goldfish_pipe_device_deinit(void *raw_dev,
+ struct platform_device *pdev)
+{
+ struct goldfish_pipe_dev *dev = raw_dev;
+
+ misc_deregister(&dev->miscdev);
+ return 0;
+}
diff --git a/goldfish_pipe_v2.c b/goldfish_pipe_v2.c
new file mode 100644
index 0000000..bf80291
--- /dev/null
+++ b/goldfish_pipe_v2.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
+ * Copyright (C) 2011-2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This source file contains the implementation of a special device driver
+ * that intends to provide a *very* fast communication channel between the
+ * guest system and the QEMU emulator.
+ *
+ * Usage from the guest is simply the following (error handling simplified):
+ *
+ * int fd = open("/dev/qemu_pipe",O_RDWR);
+ * .... write() or read() through the pipe.
+ *
+ * This driver doesn't deal with the exact protocol used during the session.
+ * It is intended to be as simple as something like:
+ *
+ * // do this _just_ after opening the fd to connect to a specific
+ * // emulator service.
+ * const char* msg = "<pipename>";
+ * if (write(fd, msg, strlen(msg)+1) < 0) {
+ * ... could not connect to <pipename> service
+ * close(fd);
+ * }
+ *
+ * // after this, simply read() and write() to communicate with the
+ * // service. Exact protocol details left as an exercise to the reader.
+ *
+ * This driver is very fast because it doesn't copy any data through
+ * intermediate buffers, since the emulator is capable of translating
+ * guest user addresses into host ones.
+ *
+ * Note that we must however ensure that each user page involved in the
+ * exchange is properly mapped during a transfer.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/bug.h>
+
+#include "uapi/goldfish_dma.h"
+#include "goldfish_pipe_qemu.h"
+#include "goldfish_pipe.h"
+
+/*
+ * Update this when something changes in the driver's behavior so the host
+ * can benefit from knowing it
+ * Notes:
+ * version 2 was an intermediate release and isn't supported anymore.
+ * version 3 is goldfish_pipe_v2 without DMA support.
+ * version 4 (current) is goldfish_pipe_v2 with DMA support.
+ */
+enum {
+ PIPE_DRIVER_VERSION = 4,
+ PIPE_CURRENT_DEVICE_VERSION = 2
+};
+
+enum {
+ MAX_BUFFERS_PER_COMMAND = 336,
+ MAX_SIGNALLED_PIPES = 64,
+ INITIAL_PIPES_CAPACITY = 64,
+ DMA_REGION_MIN_SIZE = PAGE_SIZE,
+ DMA_REGION_MAX_SIZE = 256 << 20
+};
+
+struct goldfish_pipe_dev;
+
+static int goldfish_pipe_device_deinit(void *raw_dev,
+ struct platform_device *pdev);
+
+/* A per-pipe command structure, shared with the host */
+struct goldfish_pipe_command {
+ s32 cmd; /* PipeCmdCode, guest -> host */
+ s32 id; /* pipe id, guest -> host */
+ s32 status; /* command execution status, host -> guest */
+ s32 reserved; /* to pad to 64-bit boundary */
+ union {
+ /* Parameters for PIPE_CMD_{READ,WRITE} */
+ struct {
+ /* number of buffers, guest -> host */
+ u32 buffers_count;
+ /* number of consumed bytes, host -> guest */
+ s32 consumed_size;
+ /* buffer pointers, guest -> host */
+ u64 ptrs[MAX_BUFFERS_PER_COMMAND];
+ /* buffer sizes, guest -> host */
+ u32 sizes[MAX_BUFFERS_PER_COMMAND];
+ } rw_params;
+ /* Parameters for PIPE_CMD_DMA_HOST_(UN)MAP */
+ struct {
+ u64 dma_paddr;
+ u64 sz;
+ } dma_maphost_params;
+ };
+};
+
+/* A single signalled pipe information */
+struct signalled_pipe_buffer {
+ u32 id;
+ u32 flags;
+};
+
+/* Parameters for the PIPE_CMD_OPEN command */
+struct open_command_param {
+ u64 command_buffer_ptr;
+ u32 rw_params_max_count;
+};
+
+/* Device-level set of buffers shared with the host */
+struct goldfish_pipe_dev_buffers {
+ struct open_command_param open_command_params;
+ struct signalled_pipe_buffer
+ signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
+};
+
+/*
+ * The main data structure tracking state is
+ * struct goldfish_dma_context, which is included
+ * as an extra pointer field in struct goldfish_pipe.
+ * Each such context is associated with possibly
+ * one physical address and size describing the
+ * allocated DMA region, and only one allocation
+ * is allowed for each pipe fd. Further allocations
+ * require more open()'s of pipe fd's.
+ */
+struct goldfish_dma_context {
+ struct device *pdev_dev; /* pointer to feed to dma_*_coherent */
+ void *dma_vaddr; /* kernel vaddr of dma region */
+ size_t dma_size; /* size of dma region */
+ dma_addr_t phys_begin; /* paddr of dma region */
+ dma_addr_t phys_end; /* paddr of dma region + dma_size */
+};
+
+/* This data type models a given pipe instance */
+struct goldfish_pipe {
+ /* pipe ID - index into goldfish_pipe_dev::pipes array */
+ u32 id;
+
+ /* The wake flags pipe is waiting for
+ * Note: not protected with any lock, uses atomic operations
+ * and barriers to make it thread-safe.
+ */
+ unsigned long flags;
+
+ /* wake flags host have signalled,
+ * - protected by goldfish_pipe_dev::lock
+ */
+ unsigned long signalled_flags;
+
+ /* A pointer to command buffer */
+ struct goldfish_pipe_command *command_buffer;
+
+ /* doubly linked list of signalled pipes, protected by
+ * goldfish_pipe_dev::lock
+ */
+ struct goldfish_pipe *prev_signalled;
+ struct goldfish_pipe *next_signalled;
+
+ /*
+ * A pipe's own lock. Protects the following:
+ * - *command_buffer - makes sure a command can safely write its
+ * parameters to the host and read the results back.
+ */
+ struct mutex lock;
+
+ /* A wake queue for sleeping until host signals an event */
+ wait_queue_head_t wake_queue;
+
+ /* Pointer to the parent goldfish_pipe_dev instance */
+ struct goldfish_pipe_dev *dev;
+
+ /* A buffer of pages, too large to fit into a stack frame */
+ struct page *pages[MAX_BUFFERS_PER_COMMAND];
+
+ /* Holds information about reserved DMA region for this pipe */
+ struct goldfish_dma_context *dma;
+};
+
+/* The global driver data. Holds a reference to the i/o page used to
+ * communicate with the emulator, and a wake queue for blocked tasks
+ * waiting to be awoken.
+ */
+struct goldfish_pipe_dev {
+ /* Needed for 'remove' */
+ struct goldfish_pipe_dev_base super;
+
+ /*
+ * Global device spinlock. Protects the following members:
+ * - pipes, pipes_capacity
+ * - [*pipes, *pipes + pipes_capacity) - array data
+ * - first_signalled_pipe,
+ * goldfish_pipe::prev_signalled,
+ * goldfish_pipe::next_signalled,
+ * goldfish_pipe::signalled_flags - all singnalled-related fields,
+ * in all allocated pipes
+ * - open_command_params - PIPE_CMD_OPEN-related buffers
+ *
+ * It looks like a lot of different fields, but the trick is that
+ * the only operation that happens often is the signalled pipes array
+ * manipulation. That's why it's OK for now to keep the rest of the
+ * fields under the same lock. If we notice too much contention because
+ * of PIPE_CMD_OPEN, then we should add a separate lock there.
+ */
+ spinlock_t lock;
+
+ /*
+ * Array of the pipes of |pipes_capacity| elements,
+ * indexed by goldfish_pipe::id
+ */
+ struct goldfish_pipe **pipes;
+ u32 pipes_capacity;
+
+ /* Pointers to the buffers host uses for interaction with this driver */
+ struct goldfish_pipe_dev_buffers *buffers;
+
+ /* Head of a doubly linked list of signalled pipes */
+ struct goldfish_pipe *first_signalled_pipe;
+
+ /* ptr to platform device's device struct */
+ struct device *pdev_dev;
+
+ /* Some device-specific data */
+ unsigned char __iomem *base;
+
+ /* an irq tasklet to run goldfish_interrupt_task */
+ struct tasklet_struct irq_tasklet;
+
+ struct miscdevice miscdev;
+
+ /* DMA info */
+ size_t dma_alloc_total;
+};
+
+static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
+ enum PipeCmdCode cmd)
+{
+ pipe->command_buffer->cmd = cmd;
+ /* failure by default */
+ pipe->command_buffer->status = PIPE_ERROR_INVAL;
+ writel(pipe->id, pipe->dev->base + PIPE_V2_REG_CMD);
+ return pipe->command_buffer->status;
+}
+
+static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+{
+ int status;
+
+ if (mutex_lock_interruptible(&pipe->lock))
+ return PIPE_ERROR_IO;
+ status = goldfish_pipe_cmd_locked(pipe, cmd);
+ mutex_unlock(&pipe->lock);
+ return status;
+}
+
+/*
+ * This function converts an error code returned by the emulator through
+ * the PIPE_V2_REG_STATUS i/o register into a valid negative errno value.
+ */
+static int goldfish_pipe_error_convert(int status)
+{
+ switch (status) {
+ case PIPE_ERROR_AGAIN:
+ return -EAGAIN;
+ case PIPE_ERROR_NOMEM:
+ return -ENOMEM;
+ case PIPE_ERROR_IO:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pin_user_pages(unsigned long first_page,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
+{
+ int ret;
+ int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
+
+ if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
+ requested_pages = MAX_BUFFERS_PER_COMMAND;
+ *iter_last_page_size = PAGE_SIZE;
+ } else {
+ *iter_last_page_size = last_page_size;
+ }
+
+ ret = get_user_pages_fast(first_page, requested_pages, !is_write,
+ pages);
+ if (ret <= 0)
+ return -EFAULT;
+ if (ret < requested_pages)
+ *iter_last_page_size = PAGE_SIZE;
+
+ return ret;
+}
+
+static void release_user_pages(struct page **pages, int pages_count,
+ int is_write, s32 consumed_size)
+{
+ int i;
+
+ for (i = 0; i < pages_count; i++) {
+ if (!is_write && consumed_size > 0)
+ set_page_dirty(pages[i]);
+ put_page(pages[i]);
+ }
+}
+
+/* Populate the call parameters, merging adjacent pages together */
+static void populate_rw_params(struct page **pages,
+ int pages_count,
+ unsigned long address,
+ unsigned long address_end,
+ unsigned long first_page,
+ unsigned long last_page,
+ unsigned int iter_last_page_size,
+ int is_write,
+ struct goldfish_pipe_command *command)
+{
+ /*
+ * Process the first page separately - it's the only page that
+ * needs special handling for its start address.
+ */
+ unsigned long xaddr = page_to_phys(pages[0]);
+ unsigned long xaddr_prev = xaddr;
+ int buffer_idx = 0;
+ int i = 1;
+ int size_on_page = first_page == last_page
+ ? (int)(address_end - address)
+ : (PAGE_SIZE - (address & ~PAGE_MASK));
+ command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
+ command->rw_params.sizes[0] = size_on_page;
+ for (; i < pages_count; ++i) {
+ xaddr = page_to_phys(pages[i]);
+ size_on_page = (i == pages_count - 1) ?
+ iter_last_page_size : PAGE_SIZE;
+ if (xaddr == xaddr_prev + PAGE_SIZE) {
+ command->rw_params.sizes[buffer_idx] += size_on_page;
+ } else {
+ ++buffer_idx;
+ command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
+ command->rw_params.sizes[buffer_idx] = size_on_page;
+ }
+ xaddr_prev = xaddr;
+ }
+ command->rw_params.buffers_count = buffer_idx + 1;
+}
+
+static int transfer_max_buffers(struct goldfish_pipe *pipe,
+ unsigned long address,
+ unsigned long address_end,
+ int is_write,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ s32 *consumed_size,
+ int *status)
+{
+ unsigned long first_page = address & PAGE_MASK;
+ unsigned int iter_last_page_size;
+ int pages_count;
+
+ /* Serialize access to the pipe command buffers */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ pages_count = pin_user_pages(first_page, last_page,
+ last_page_size, is_write,
+ pipe->pages, &iter_last_page_size);
+ if (pages_count < 0) {
+ mutex_unlock(&pipe->lock);
+ return pages_count;
+ }
+
+ populate_rw_params(pipe->pages, pages_count, address, address_end,
+ first_page, last_page, iter_last_page_size, is_write,
+ pipe->command_buffer);
+
+ /* Transfer the data */
+ *status = goldfish_pipe_cmd_locked(pipe,
+ is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
+
+ *consumed_size = pipe->command_buffer->rw_params.consumed_size;
+
+ release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
+
+ mutex_unlock(&pipe->lock);
+ return 0;
+}
+
+static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
+{
+ u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+
+ set_bit(wake_bit, &pipe->flags);
+
+ /* Tell the emulator we're going to wait for a wake event */
+ goldfish_pipe_cmd(pipe,
+ is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
+
+ while (test_bit(wake_bit, &pipe->flags)) {
+ if (wait_event_interruptible(pipe->wake_queue,
+ !test_bit(wake_bit, &pipe->flags)))
+ return -ERESTARTSYS;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static ssize_t goldfish_pipe_read_write(struct file *filp,
+ char __user *buffer,
+ size_t bufflen,
+ int is_write)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ int count = 0, ret = -EINVAL;
+ unsigned long address, address_end, last_page;
+ unsigned int last_page_size;
+
+ /* If the emulator already closed the pipe, no need to go further */
+ if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
+ return -EIO;
+ /* Null reads or writes succeeds */
+ if (unlikely(bufflen == 0))
+ return 0;
+ /* Check the buffer range for access */
+ if (unlikely(!access_ok(buffer, bufflen)))
+ return -EFAULT;
+
+ address = (unsigned long)buffer;
+ address_end = address + bufflen;
+ last_page = (address_end - 1) & PAGE_MASK;
+ last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
+
+ while (address < address_end) {
+ s32 consumed_size;
+ int status;
+
+ ret = transfer_max_buffers(pipe, address, address_end, is_write,
+ last_page, last_page_size,
+ &consumed_size, &status);
+ if (ret < 0)
+ break;
+
+ if (consumed_size > 0) {
+ /* No matter what's the status, we've transferred
+ * something.
+ */
+ count += consumed_size;
+ address += consumed_size;
+ }
+ if (status > 0)
+ continue;
+ if (status == 0) {
+ /* EOF */
+ ret = 0;
+ break;
+ }
+ if (count > 0) {
+ /*
+ * An error occurred, but we already transferred
+ * something on one of the previous iterations.
+ * Just return what we already copied and log this
+ * err.
+ */
+ if (status != PIPE_ERROR_AGAIN)
+ dev_err_ratelimited(pipe->dev->pdev_dev,
+ "backend error %d on %s\n",
+ status, is_write ? "write" : "read");
+ break;
+ }
+
+ /*
+ * If the error is not PIPE_ERROR_AGAIN, or if we are in
+ * non-blocking mode, just return the error code.
+ */
+ if (status != PIPE_ERROR_AGAIN ||
+ (filp->f_flags & O_NONBLOCK) != 0) {
+ ret = goldfish_pipe_error_convert(status);
+ break;
+ }
+
+ status = wait_for_host_signal(pipe, is_write);
+ if (status < 0)
+ return status;
+ }
+
+ if (count > 0)
+ return count;
+ return ret;
+}
+
+static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
+ size_t bufflen, loff_t *ppos)
+{
+ return goldfish_pipe_read_write(filp, buffer, bufflen,
+ /* is_write */ 0);
+}
+
+static ssize_t goldfish_pipe_write(struct file *filp,
+ const char __user *buffer, size_t bufflen,
+ loff_t *ppos)
+{
+ /* cast away the const */
+ char __user *no_const_buffer = (char __user *)buffer;
+
+ return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
+ /* is_write */ 1);
+}
+
+static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+{
+ struct goldfish_pipe *pipe = filp->private_data;
+ unsigned int mask = 0;
+ int status;
+
+ poll_wait(filp, &pipe->wake_queue, wait);
+
+ status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
+ if (status < 0)
+ return -ERESTARTSYS;
+
+ if (status & PIPE_POLL_IN)
+ mask |= POLLIN | POLLRDNORM;
+ if (status & PIPE_POLL_OUT)
+ mask |= POLLOUT | POLLWRNORM;
+ if (status & PIPE_POLL_HUP)
+ mask |= POLLHUP;
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ mask |= POLLERR;
+
+ return mask;
+}
+
+static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
+ u32 id, u32 flags)
+{
+ struct goldfish_pipe *pipe;
+
+ if (WARN_ON(id >= dev->pipes_capacity))
+ return;
+
+ pipe = dev->pipes[id];
+ if (!pipe)
+ return;
+ pipe->signalled_flags |= flags;
+
+ if (pipe->prev_signalled || pipe->next_signalled ||
+ dev->first_signalled_pipe == pipe)
+ return; /* already in the list */
+ pipe->next_signalled = dev->first_signalled_pipe;
+ if (dev->first_signalled_pipe)
+ dev->first_signalled_pipe->prev_signalled = pipe;
+ dev->first_signalled_pipe = pipe;
+}
+
+static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
+ struct goldfish_pipe *pipe)
+{
+ if (pipe->prev_signalled)
+ pipe->prev_signalled->next_signalled = pipe->next_signalled;
+ if (pipe->next_signalled)
+ pipe->next_signalled->prev_signalled = pipe->prev_signalled;
+ if (pipe == dev->first_signalled_pipe)
+ dev->first_signalled_pipe = pipe->next_signalled;
+ pipe->prev_signalled = NULL;
+ pipe->next_signalled = NULL;
+}
+
+static struct goldfish_pipe *signalled_pipes_pop_front(
+ struct goldfish_pipe_dev *dev, int *wakes)
+{
+ struct goldfish_pipe *pipe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ pipe = dev->first_signalled_pipe;
+ if (pipe) {
+ *wakes = pipe->signalled_flags;
+ pipe->signalled_flags = 0;
+ /*
+ * This is an optimized version of
+ * signalled_pipes_remove_locked()
+ * - We want to make it as fast as possible to
+ * wake the sleeping pipe operations faster.
+ */
+ dev->first_signalled_pipe = pipe->next_signalled;
+ if (dev->first_signalled_pipe)
+ dev->first_signalled_pipe->prev_signalled = NULL;
+ pipe->next_signalled = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return pipe;
+}
+
+static void goldfish_interrupt_task(unsigned long dev_addr)
+{
+ /* Iterate over the signalled pipes and wake them one by one */
+ struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr;
+ struct goldfish_pipe *pipe;
+ int wakes;
+
+ while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
+ if (wakes & PIPE_WAKE_CLOSED) {
+ pipe->flags = 1 << BIT_CLOSED_ON_HOST;
+ } else {
+ if (wakes & PIPE_WAKE_READ)
+ clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
+ if (wakes & PIPE_WAKE_WRITE)
+ clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
+ }
+ /*
+ * wake_up_interruptible() implies a write barrier, so don't
+ * explicitly add another one here.
+ */
+ wake_up_interruptible(&pipe->wake_queue);
+ }
+}
+
+/*
+ * The general idea of the interrupt handling:
+ *
+ * 1. device raises an interrupt if there's at least one signalled pipe
+ * 2. IRQ handler reads the signalled pipes and their count from the device
+ * 3. device writes them into a shared buffer and returns the count
+ * it only resets the IRQ if it has returned all signalled pipes,
+ * otherwise it leaves it raised, so IRQ handler will be called
+ * again for the next chunk
+ * 4. IRQ handler adds all returned pipes to the device's signalled pipes list
+ * 5. IRQ handler launches a tasklet to process the signalled pipes from the
+ * list in a separate context
+ */
+static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+{
+ u32 count;
+ u32 i;
+ unsigned long flags;
+ struct goldfish_pipe_dev *dev = dev_id;
+
+ if (dev->super.deinit != &goldfish_pipe_device_deinit)
+ return IRQ_NONE;
+
+ /* Request the signalled pipes from the device */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ count = readl(dev->base + PIPE_V2_REG_GET_SIGNALLED);
+ if (count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_NONE;
+ }
+ if (count > MAX_SIGNALLED_PIPES)
+ count = MAX_SIGNALLED_PIPES;
+
+ for (i = 0; i < count; ++i)
+ signalled_pipes_add_locked(dev,
+ dev->buffers->signalled_pipe_buffers[i].id,
+ dev->buffers->signalled_pipe_buffers[i].flags);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ tasklet_schedule(&dev->irq_tasklet);
+ return IRQ_HANDLED;
+}
+
+static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
+{
+ int id;
+
+ for (id = 0; id < dev->pipes_capacity; ++id)
+ if (!dev->pipes[id])
+ return id;
+
+ {
+ /* Reallocate the array.
+ * Since get_free_pipe_id_locked runs with interrupts disabled,
+ * we don't want to make calls that could lead to sleep.
+ */
+ u32 new_capacity = 2 * dev->pipes_capacity;
+ struct goldfish_pipe **pipes =
+ kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
+ if (!pipes)
+ return -ENOMEM;
+ memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
+ kfree(dev->pipes);
+ dev->pipes = pipes;
+ id = dev->pipes_capacity;
+ dev->pipes_capacity = new_capacity;
+ }
+ return id;
+}
+
+/* A helper function to get the instance of goldfish_pipe_dev from file */
+static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
+}
+
+/**
+ * goldfish_pipe_open - open a channel to the AVD
+ * @inode: inode of device
+ * @file: file struct of opener
+ *
+ * Create a new pipe link between the emulator and the use application.
+ * Each new request produces a new pipe.
+ *
+ * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
+ * right now so this is fine. A move to 64bit will need this addressing
+ */
+static int goldfish_pipe_open(struct inode *inode, struct file *file)
+{
+ struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
+ unsigned long flags;
+ int id;
+ int status;
+
+ /* Allocate new pipe kernel object */
+ struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+
+ if (!pipe)
+ return -ENOMEM;
+
+ pipe->dev = dev;
+ mutex_init(&pipe->lock);
+ init_waitqueue_head(&pipe->wake_queue);
+
+ /*
+ * Command buffer needs to be allocated on its own page to make sure
+ * it is physically contiguous in host's address space.
+ */
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
+ pipe->command_buffer =
+ (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
+ if (!pipe->command_buffer) {
+ status = -ENOMEM;
+ goto err_pipe;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ id = get_free_pipe_id_locked(dev);
+ if (id < 0) {
+ status = id;
+ goto err_id_locked;
+ }
+
+ dev->pipes[id] = pipe;
+ pipe->id = id;
+ pipe->command_buffer->id = id;
+
+ /* Now tell the emulator we're opening a new pipe. */
+ dev->buffers->open_command_params.rw_params_max_count =
+ MAX_BUFFERS_PER_COMMAND;
+ dev->buffers->open_command_params.command_buffer_ptr =
+ (u64)(unsigned long)__pa(pipe->command_buffer);
+ status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (status < 0)
+ goto err_cmd;
+ pipe->dma = NULL;
+
+ /* All is done, save the pipe into the file's private data field */
+ file->private_data = pipe;
+ return 0;
+
+err_cmd:
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pipes[id] = NULL;
+err_id_locked:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ free_page((unsigned long)pipe->command_buffer);
+err_pipe:
+ kfree(pipe);
+ return status;
+}
+
+static void goldfish_pipe_dma_release_host(struct goldfish_pipe *pipe)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+ struct device *pdev_dev;
+
+ if (!dma)
+ return;
+
+ pdev_dev = pipe->dev->pdev_dev;
+
+ if (dma->dma_vaddr) {
+ pipe->command_buffer->dma_maphost_params.dma_paddr =
+ dma->phys_begin;
+ pipe->command_buffer->dma_maphost_params.sz = dma->dma_size;
+ goldfish_pipe_cmd(pipe, PIPE_CMD_DMA_HOST_UNMAP);
+ }
+}
+
+static void goldfish_pipe_dma_release_guest(struct goldfish_pipe *pipe)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+
+ if (!dma)
+ return;
+
+ if (dma->dma_vaddr) {
+ dma_free_coherent(dma->pdev_dev,
+ dma->dma_size,
+ dma->dma_vaddr,
+ dma->phys_begin);
+ pipe->dev->dma_alloc_total -= dma->dma_size;
+ }
+}
+
+static int goldfish_pipe_release(struct inode *inode, struct file *filp)
+{
+ unsigned long flags;
+ struct goldfish_pipe *pipe = filp->private_data;
+ struct goldfish_pipe_dev *dev = pipe->dev;
+
+ /* The guest is closing the channel, so tell the emulator right now */
+ goldfish_pipe_dma_release_host(pipe);
+ goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pipes[pipe->id] = NULL;
+ signalled_pipes_remove_locked(dev, pipe);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ filp->private_data = NULL;
+
+ /* Even if a fd is duped or involved in a forked process,
+ * open/release methods are called only once, ever.
+ * This makes goldfish_pipe_release a safe point
+ * to delete the DMA region.
+ */
+ goldfish_pipe_dma_release_guest(pipe);
+
+ kfree(pipe->dma);
+ free_page((unsigned long)pipe->command_buffer);
+ kfree(pipe);
+
+ return 0;
+}
+
+/* VMA open/close are for debugging purposes only.
+ * One might think that fork() (and thus pure calls to open())
+ * will require some sort of bookkeeping or refcounting
+ * for dma contexts (incl. when to call dma_free_coherent),
+ * but |vm_private_data| field and |vma_open/close| are only
+ * for situations where the driver needs to interact with vma's
+ * directly with its own per-VMA data structure (which does
+ * need to be refcounted).
+ *
+ * Here, we just use the kernel's existing
+ * VMA processing; we don't do anything on our own.
+ * The only reason we would want to do so is if we had to do
+ * special processing for the virtual (not physical) memory
+ * already associated with DMA memory; it is much less related
+ * to the task of knowing when to alloc/dealloc DMA memory.
+ */
+static void goldfish_dma_vma_open(struct vm_area_struct *vma)
+{
+ /* Not used */
+}
+
+static void goldfish_dma_vma_close(struct vm_area_struct *vma)
+{
+ /* Not used */
+}
+
+static const struct vm_operations_struct goldfish_dma_vm_ops = {
+ .open = goldfish_dma_vma_open,
+ .close = goldfish_dma_vma_close,
+};
+
+static bool is_page_size_multiple(unsigned long sz)
+{
+ return !(sz & (PAGE_SIZE - 1));
+}
+
+static bool check_region_size_valid(size_t size)
+{
+ if (size < DMA_REGION_MIN_SIZE)
+ return false;
+
+ if (size > DMA_REGION_MAX_SIZE)
+ return false;
+
+ return is_page_size_multiple(size);
+}
+
+static int goldfish_pipe_dma_alloc_locked(struct goldfish_pipe *pipe)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+
+ if (dma->dma_vaddr)
+ return 0;
+
+ dma->phys_begin = 0;
+ dma->dma_vaddr = dma_alloc_coherent(dma->pdev_dev,
+ dma->dma_size,
+ &dma->phys_begin,
+ GFP_KERNEL);
+ if (!dma->dma_vaddr)
+ return -ENOMEM;
+
+ dma->phys_end = dma->phys_begin + dma->dma_size;
+ pipe->dev->dma_alloc_total += dma->dma_size;
+ pipe->command_buffer->dma_maphost_params.dma_paddr = dma->phys_begin;
+ pipe->command_buffer->dma_maphost_params.sz = dma->dma_size;
+
+ goldfish_pipe_cmd_locked(pipe, PIPE_CMD_DMA_HOST_MAP);
+ /* A workaround for b/110152998 */
+ return 0;
+}
+
+static int goldfish_dma_mmap_locked(struct goldfish_pipe *pipe,
+ struct vm_area_struct *vma)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+ size_t sz_requested = vma->vm_end - vma->vm_start;
+ int status;
+
+ if (!check_region_size_valid(sz_requested)) {
+ dev_err(pdev_dev, "%s: bad size (%zu) requested\n", __func__,
+ sz_requested);
+ return -EINVAL;
+ }
+
+ /* Alloc phys region if not allocated already. */
+ status = goldfish_pipe_dma_alloc_locked(pipe);
+ if (status)
+ return status;
+
+ status = remap_pfn_range(vma,
+ vma->vm_start,
+ dma->phys_begin >> PAGE_SHIFT,
+ sz_requested,
+ vma->vm_page_prot);
+ if (status < 0) {
+ dev_err(pdev_dev, "Cannot remap pfn range....\n");
+ return -EAGAIN;
+ }
+ vma->vm_ops = &goldfish_dma_vm_ops;
+ return 0;
+}
+
+/* When we call mmap() on a pipe fd, we obtain a pointer into
+ * the physically contiguous DMA region of the pipe device
+ * (Goldfish DMA).
+ */
+static int goldfish_dma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct goldfish_pipe *pipe =
+ (struct goldfish_pipe *)(filp->private_data);
+ int status;
+
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ status = goldfish_dma_mmap_locked(pipe, vma);
+ mutex_unlock(&pipe->lock);
+ return status;
+}
+
+static int goldfish_pipe_dma_create_region(struct goldfish_pipe *pipe,
+ size_t size)
+{
+ struct goldfish_dma_context *dma =
+ kzalloc(sizeof(struct goldfish_dma_context), GFP_KERNEL);
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+
+ if (dma) {
+ if (mutex_lock_interruptible(&pipe->lock)) {
+ kfree(dma);
+ return -ERESTARTSYS;
+ }
+
+ if (pipe->dma) {
+ mutex_unlock(&pipe->lock);
+ kfree(dma);
+ dev_err(pdev_dev, "The DMA region already allocated\n");
+ return -EBUSY;
+ }
+
+ dma->dma_size = size;
+ dma->pdev_dev = pipe->dev->pdev_dev;
+ pipe->dma = dma;
+ mutex_unlock(&pipe->lock);
+ return 0;
+ }
+
+ dev_err(pdev_dev, "Could not allocate DMA context info!\n");
+ return -ENOMEM;
+}
+
+static long goldfish_dma_ioctl_getoff(struct goldfish_pipe *pipe,
+ unsigned long arg)
+{
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+ struct goldfish_dma_ioctl_info ioctl_data;
+ struct goldfish_dma_context *dma;
+
+ BUILD_BUG_ON(FIELD_SIZEOF(struct goldfish_dma_ioctl_info, phys_begin) <
+ FIELD_SIZEOF(struct goldfish_dma_context, phys_begin));
+
+ if (mutex_lock_interruptible(&pipe->lock)) {
+ dev_err(pdev_dev, "DMA_GETOFF: the pipe is not locked\n");
+ return -EACCES;
+ }
+
+ dma = pipe->dma;
+ if (dma) {
+ ioctl_data.phys_begin = dma->phys_begin;
+ ioctl_data.size = dma->dma_size;
+ } else {
+ ioctl_data.phys_begin = 0;
+ ioctl_data.size = 0;
+ }
+
+ if (copy_to_user((void __user *)arg, &ioctl_data,
+ sizeof(ioctl_data))) {
+ mutex_unlock(&pipe->lock);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&pipe->lock);
+ return 0;
+}
+
+static long goldfish_dma_ioctl_create_region(struct goldfish_pipe *pipe,
+ unsigned long arg)
+{
+ struct goldfish_dma_ioctl_info ioctl_data;
+
+ if (copy_from_user(&ioctl_data, (void __user *)arg, sizeof(ioctl_data)))
+ return -EFAULT;
+
+ if (!check_region_size_valid(ioctl_data.size)) {
+ dev_err(pipe->dev->pdev_dev,
+ "DMA_CREATE_REGION: bad size (%lld) requested\n",
+ ioctl_data.size);
+ return -EINVAL;
+ }
+
+ return goldfish_pipe_dma_create_region(pipe, ioctl_data.size);
+}
+
+static long goldfish_dma_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct goldfish_pipe *pipe =
+ (struct goldfish_pipe *)(file->private_data);
+
+ switch (cmd) {
+ case GOLDFISH_DMA_IOC_LOCK:
+ return 0;
+ case GOLDFISH_DMA_IOC_UNLOCK:
+ wake_up_interruptible(&pipe->wake_queue);
+ return 0;
+ case GOLDFISH_DMA_IOC_GETOFF:
+ return goldfish_dma_ioctl_getoff(pipe, arg);
+ case GOLDFISH_DMA_IOC_CREATE_REGION:
+ return goldfish_dma_ioctl_create_region(pipe, arg);
+ }
+ return -ENOTTY;
+}
+
+static const struct file_operations goldfish_pipe_fops = {
+ .owner = THIS_MODULE,
+ .read = goldfish_pipe_read,
+ .write = goldfish_pipe_write,
+ .poll = goldfish_pipe_poll,
+ .open = goldfish_pipe_open,
+ .release = goldfish_pipe_release,
+ /* DMA-related operations */
+ .mmap = goldfish_dma_mmap,
+ .unlocked_ioctl = goldfish_dma_ioctl,
+ .compat_ioctl = goldfish_dma_ioctl,
+};
+
+static void init_miscdevice(struct miscdevice *miscdev)
+{
+ memset(miscdev, 0, sizeof(*miscdev));
+
+ miscdev->minor = MISC_DYNAMIC_MINOR;
+ miscdev->name = DEVICE_NAME;
+ miscdev->fops = &goldfish_pipe_fops;
+}
+
+static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
+{
+ const unsigned long paddr = __pa(addr);
+
+ writel(upper_32_bits(paddr), porth);
+ writel(lower_32_bits(paddr), portl);
+}
+
+int goldfish_pipe_device_v2_init(struct platform_device *pdev,
+ char __iomem *base,
+ int irq)
+{
+ struct goldfish_pipe_dev *dev;
+ int err;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->super.deinit = &goldfish_pipe_device_deinit;
+ spin_lock_init(&dev->lock);
+
+ tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task,
+ (unsigned long)dev);
+
+ err = devm_request_irq(&pdev->dev, irq,
+ goldfish_pipe_interrupt,
+ IRQF_SHARED, DEVICE_NAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
+ return err;
+ }
+
+ init_miscdevice(&dev->miscdev);
+ err = misc_register(&dev->miscdev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register v2 device\n");
+ return err;
+ }
+
+ dev->base = base;
+ dev->pdev_dev = &pdev->dev;
+ dev->first_signalled_pipe = NULL;
+ dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
+ dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
+ GFP_KERNEL);
+ if (!dev->pipes) {
+ misc_deregister(&dev->miscdev);
+ return -ENOMEM;
+ }
+
+ /*
+ * We're going to pass two buffers, open_command_params and
+ * signalled_pipe_buffers, to the host. This means each of those buffers
+ * needs to be contained in a single physical page. The easiest choice
+ * is to just allocate a page and place the buffers in it.
+ */
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)
+ __get_free_page(GFP_KERNEL);
+ if (!dev->buffers) {
+ kfree(dev->pipes);
+ misc_deregister(&dev->miscdev);
+ return -ENOMEM;
+ }
+
+ /* Send the buffer addresses to the host */
+ write_pa_addr(&dev->buffers->signalled_pipe_buffers,
+ dev->base + PIPE_V2_REG_SIGNAL_BUFFER,
+ dev->base + PIPE_V2_REG_SIGNAL_BUFFER_HIGH);
+
+ writel(MAX_SIGNALLED_PIPES,
+ dev->base + PIPE_V2_REG_SIGNAL_BUFFER_COUNT);
+
+ write_pa_addr(&dev->buffers->open_command_params,
+ dev->base + PIPE_V2_REG_OPEN_BUFFER,
+ dev->base + PIPE_V2_REG_OPEN_BUFFER_HIGH);
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+}
+
+static int goldfish_pipe_device_deinit(void *raw_dev,
+ struct platform_device *pdev)
+{
+ struct goldfish_pipe_dev *dev = raw_dev;
+
+ misc_deregister(&dev->miscdev);
+ tasklet_kill(&dev->irq_tasklet);
+ kfree(dev->pipes);
+ free_page((unsigned long)dev->buffers);
+
+ return 0;
+}
diff --git a/goldfish_rotary.c b/goldfish_rotary.c
new file mode 100644
index 0000000..91600a0
--- /dev/null
+++ b/goldfish_rotary.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8
+#endif
+
+enum {
+ REG_READ = 0x00,
+ REG_SET_PAGE = 0x00,
+ REG_LEN = 0x04,
+ REG_DATA = 0x08,
+
+ PAGE_NAME = 0x00000,
+ PAGE_EVBITS = 0x10000,
+ PAGE_ABSDATA = 0x20000 | EV_ABS,
+
+ IRQ_MAGIC = 987642334
+};
+
+struct event_dev {
+ int magic;
+ int irq;
+ struct input_dev *input;
+ void __iomem *addr;
+ char name[0];
+};
+
+static irqreturn_t rotary_interrupt_impl(struct event_dev *edev)
+{
+ unsigned int type, code, value;
+
+ type = readl(edev->addr + REG_READ);
+ code = readl(edev->addr + REG_READ);
+ value = readl(edev->addr + REG_READ);
+
+ input_event(edev->input, type, code, value);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rotary_interrupt(int irq, void *dev_id)
+{
+ struct event_dev *edev = dev_id;
+
+ if (edev->magic != IRQ_MAGIC)
+ return IRQ_NONE;
+
+ return rotary_interrupt_impl(edev);
+}
+
+static void rotary_import_bits(struct event_dev *edev,
+ unsigned long bits[],
+ unsigned int type, size_t count)
+{
+ void __iomem *addr = edev->addr;
+ int i, j;
+ size_t size;
+ uint8_t val;
+
+ writel(PAGE_EVBITS | type, addr + REG_SET_PAGE);
+
+ size = readl(addr + REG_LEN) * CHAR_BIT;
+ if (size < count)
+ count = size;
+
+ addr += REG_DATA;
+ for (i = 0; i < count; i += CHAR_BIT) {
+ val = readb(addr++);
+ for (j = 0; j < CHAR_BIT; j++)
+ if (val & 1 << j)
+ set_bit(i + j, bits);
+ }
+}
+
+static void rotary_import_abs_params(struct event_dev *edev)
+{
+ struct input_dev *input_dev = edev->input;
+ void __iomem *addr = edev->addr;
+ u32 val[4];
+ int count;
+ int i, j;
+
+ writel(PAGE_ABSDATA, addr + REG_SET_PAGE);
+
+ count = readl(addr + REG_LEN) / sizeof(val);
+ if (count > ABS_MAX)
+ count = ABS_MAX;
+
+ for (i = 0; i < count; i++) {
+ if (!test_bit(i, input_dev->absbit))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(val); j++) {
+ int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+ val[j] = readl(edev->addr + REG_DATA + offset);
+ }
+
+ input_set_abs_params(input_dev, i,
+ val[0], val[1], val[2], val[3]);
+ }
+}
+
+static int rotary_probe(struct platform_device *pdev)
+{
+ struct input_dev *input_dev;
+ struct event_dev *edev;
+ struct resource *res;
+ unsigned int keymapnamelen;
+ void __iomem *addr;
+ int irq;
+ int i;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ addr = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
+ if (!addr)
+ return -ENOMEM;
+
+ writel(PAGE_NAME, addr + REG_SET_PAGE);
+ keymapnamelen = readl(addr + REG_LEN);
+
+ edev = devm_kzalloc(&pdev->dev,
+ sizeof(struct event_dev) + keymapnamelen + 1,
+ GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ edev->magic = IRQ_MAGIC;
+ edev->input = input_dev;
+ edev->addr = addr;
+ edev->irq = irq;
+
+ for (i = 0; i < keymapnamelen; i++)
+ edev->name[i] = readb(edev->addr + REG_DATA + i);
+
+ pr_debug("%s: keymap=%s\n", __func__, edev->name);
+
+ input_dev->name = edev->name;
+ input_dev->id.bustype = BUS_HOST;
+ rotary_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
+ rotary_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX);
+ rotary_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX);
+
+ rotary_import_abs_params(edev);
+
+ error = devm_request_irq(&pdev->dev, edev->irq, rotary_interrupt,
+ IRQF_SHARED, "goldfish-rotary", edev);
+ if (error)
+ return error;
+
+ error = input_register_device(input_dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id goldfish_rotary_of_match[] = {
+ { .compatible = "generic,goldfish-rotary", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_rotary_of_match);
+
+static const struct acpi_device_id goldfish_rotary_acpi_match[] = {
+ { "GFSH0008", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_rotary_acpi_match);
+
+static struct platform_driver rotary_driver = {
+ .probe = rotary_probe,
+ .driver = {
+ .name = "goldfish_rotary",
+ .of_match_table = goldfish_rotary_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_rotary_acpi_match),
+ },
+};
+
+module_platform_driver(rotary_driver);
+
+MODULE_AUTHOR("Nimrod Gileadi");
+MODULE_DESCRIPTION("Goldfish Rotary Encoder Device");
+MODULE_LICENSE("GPL v2");
diff --git a/goldfish_sync.c b/goldfish_sync.c
new file mode 100644
index 0000000..95ec41d
--- /dev/null
+++ b/goldfish_sync.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/* The Goldfish sync driver is designed to provide a interface
+ * between the underlying host's sync device and the kernel's
+ * fence sync framework.
+ *
+ * The purpose of the device/driver is to enable lightweight creation and
+ * signaling of timelines and fences in order to synchronize the guest with
+ * host-side graphics events.
+ *
+ * Each time the interrupt trips, the driver may perform a sync operation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/dma-fence.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sync_file.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "uapi/goldfish_sync.h"
+
+struct sync_pt {
+ struct dma_fence base; /* must be the first field in this struct */
+ struct list_head active_list; /* see active_list_head below */
+};
+
+struct goldfish_sync_state;
+
+struct goldfish_sync_timeline {
+ struct goldfish_sync_state *sync_state;
+
+ /* This object is owned by userspace from open() calls and also each
+ * sync_pt refers to it.
+ */
+ struct kref kref;
+ char name[32]; /* for debugging */
+
+ u64 context;
+ unsigned int seqno;
+ /* list of active (unsignaled/errored) sync_pts */
+ struct list_head active_list_head;
+ spinlock_t lock; /* protects the fields above */
+};
+
+/* The above definitions (command codes, register layout, ioctl definitions)
+ * need to be in sync with the following files:
+ *
+ * Host-side (emulator):
+ * external/qemu/android/emulation/goldfish_sync.h
+ * external/qemu-android/hw/misc/goldfish_sync.c
+ *
+ * Guest-side (system image):
+ * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
+ * device/generic/goldfish/ueventd.ranchu.rc
+ * platform/build/target/board/generic/sepolicy/file_contexts
+ */
+struct goldfish_sync_hostcmd {
+ /* sorted for alignment */
+ u64 handle;
+ u64 hostcmd_handle;
+ u32 cmd;
+ u32 time_arg;
+};
+
+struct goldfish_sync_guestcmd {
+ u64 host_command; /* u64 for alignment */
+ u64 glsync_handle;
+ u64 thread_handle;
+ u64 guest_timeline_handle;
+};
+
+/* The host operations are: */
+enum cmd_id {
+ /* Ready signal - used to mark when irq should lower */
+ CMD_SYNC_READY = 0,
+
+ /* Create a new timeline. writes timeline handle */
+ CMD_CREATE_SYNC_TIMELINE = 1,
+
+ /* Create a fence object. reads timeline handle and time argument.
+ * Writes fence fd to the SYNC_REG_HANDLE register.
+ */
+ CMD_CREATE_SYNC_FENCE = 2,
+
+ /* Increments timeline. reads timeline handle and time argument */
+ CMD_SYNC_TIMELINE_INC = 3,
+
+ /* Destroys a timeline. reads timeline handle */
+ CMD_DESTROY_SYNC_TIMELINE = 4,
+
+ /* Starts a wait on the host with the given glsync object and
+ * sync thread handle.
+ */
+ CMD_TRIGGER_HOST_WAIT = 5,
+};
+
+/* The host register layout is: */
+enum sync_reg_id {
+ /* host->guest batch commands */
+ SYNC_REG_BATCH_COMMAND = 0x00,
+
+ /* guest->host batch commands */
+ SYNC_REG_BATCH_GUESTCOMMAND = 0x04,
+
+ /* communicate physical address of host->guest batch commands */
+ SYNC_REG_BATCH_COMMAND_ADDR = 0x08,
+ SYNC_REG_BATCH_COMMAND_ADDR_HIGH = 0x0C, /* 64-bit part */
+
+ /* communicate physical address of guest->host commands */
+ SYNC_REG_BATCH_GUESTCOMMAND_ADDR = 0x10,
+ SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH = 0x14, /* 64-bit part */
+
+ /* signals that the device has been probed */
+ SYNC_REG_INIT = 0x18,
+};
+
+#define GOLDFISH_SYNC_MAX_CMDS 32
+
+/* The driver state: */
+struct goldfish_sync_state {
+ struct miscdevice miscdev;
+
+ char __iomem *reg_base;
+ int irq;
+
+ /* Used to generate unique names, see goldfish_sync_timeline::name. */
+ u64 id_counter;
+
+ /* |mutex_lock| protects all concurrent access
+ * to timelines for both kernel and user space.
+ */
+ struct mutex mutex_lock;
+
+ /* Buffer holding commands issued from host. */
+ struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
+ u32 to_do_end;
+ /* Protects to_do and to_do_end */
+ spinlock_t to_do_lock;
+
+ /* Buffers for the reading or writing
+ * of individual commands. The host can directly write
+ * to |batch_hostcmd| (and then this driver immediately
+ * copies contents to |to_do|). This driver either replies
+ * through |batch_hostcmd| or simply issues a
+ * guest->host command through |batch_guestcmd|.
+ */
+ struct goldfish_sync_hostcmd batch_hostcmd;
+ struct goldfish_sync_guestcmd batch_guestcmd;
+
+ /* Used to give this struct itself to a work queue
+ * function for executing actual sync commands.
+ */
+ struct work_struct work_item;
+};
+
+static struct goldfish_sync_timeline
+*goldfish_dma_fence_parent(struct dma_fence *fence)
+{
+ return container_of(fence->lock, struct goldfish_sync_timeline, lock);
+}
+
+static struct sync_pt *goldfish_sync_fence_to_sync_pt(struct dma_fence *fence)
+{
+ return container_of(fence, struct sync_pt, base);
+}
+
+/* sync_state->mutex_lock must be locked. */
+struct goldfish_sync_timeline __must_check
+*goldfish_sync_timeline_create(struct goldfish_sync_state *sync_state)
+{
+ struct goldfish_sync_timeline *tl;
+
+ tl = kzalloc(sizeof(*tl), GFP_KERNEL);
+ if (!tl)
+ return NULL;
+
+ tl->sync_state = sync_state;
+ kref_init(&tl->kref);
+ snprintf(tl->name, sizeof(tl->name),
+ "%s:%llu", GOLDFISH_SYNC_DEVICE_NAME,
+ ++sync_state->id_counter);
+ tl->context = dma_fence_context_alloc(1);
+ tl->seqno = 0;
+ INIT_LIST_HEAD(&tl->active_list_head);
+ spin_lock_init(&tl->lock);
+
+ return tl;
+}
+
+static void goldfish_sync_timeline_free(struct kref *kref)
+{
+ struct goldfish_sync_timeline *tl =
+ container_of(kref, struct goldfish_sync_timeline, kref);
+
+ kfree(tl);
+}
+
+static void goldfish_sync_timeline_get(struct goldfish_sync_timeline *tl)
+{
+ kref_get(&tl->kref);
+}
+
+void goldfish_sync_timeline_put(struct goldfish_sync_timeline *tl)
+{
+ kref_put(&tl->kref, goldfish_sync_timeline_free);
+}
+
+void goldfish_sync_timeline_signal(struct goldfish_sync_timeline *tl,
+ unsigned int inc)
+{
+ unsigned long flags;
+ struct sync_pt *pt, *next;
+
+ spin_lock_irqsave(&tl->lock, flags);
+ tl->seqno += inc;
+
+ list_for_each_entry_safe(pt, next, &tl->active_list_head, active_list) {
+ /* dma_fence_is_signaled_locked has side effects */
+ if (dma_fence_is_signaled_locked(&pt->base))
+ list_del_init(&pt->active_list);
+ }
+ spin_unlock_irqrestore(&tl->lock, flags);
+}
+
+static const struct dma_fence_ops goldfish_sync_timeline_fence_ops;
+
+static struct sync_pt __must_check
+*goldfish_sync_pt_create(struct goldfish_sync_timeline *tl,
+ unsigned int value)
+{
+ struct sync_pt *pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+
+ if (!pt)
+ return NULL;
+
+ dma_fence_init(&pt->base,
+ &goldfish_sync_timeline_fence_ops,
+ &tl->lock,
+ tl->context,
+ value);
+ INIT_LIST_HEAD(&pt->active_list);
+ goldfish_sync_timeline_get(tl); /* pt refers to tl */
+
+ return pt;
+}
+
+static void goldfish_sync_pt_destroy(struct sync_pt *pt)
+{
+ struct goldfish_sync_timeline *tl =
+ goldfish_dma_fence_parent(&pt->base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tl->lock, flags);
+ if (!list_empty(&pt->active_list))
+ list_del(&pt->active_list);
+ spin_unlock_irqrestore(&tl->lock, flags);
+
+ goldfish_sync_timeline_put(tl); /* unref pt from tl */
+ dma_fence_free(&pt->base);
+}
+
+static const char
+*goldfish_sync_timeline_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "sw_sync";
+}
+
+static const char
+*goldfish_sync_timeline_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
+
+ return tl->name;
+}
+
+static void goldfish_sync_timeline_fence_release(struct dma_fence *fence)
+{
+ goldfish_sync_pt_destroy(goldfish_sync_fence_to_sync_pt(fence));
+}
+
+static bool goldfish_sync_timeline_fence_signaled(struct dma_fence *fence)
+{
+ struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
+
+ return tl->seqno >= fence->seqno;
+}
+
+static bool
+goldfish_sync_timeline_fence_enable_signaling(struct dma_fence *fence)
+{
+ struct sync_pt *pt;
+ struct goldfish_sync_timeline *tl;
+
+ if (goldfish_sync_timeline_fence_signaled(fence))
+ return false;
+
+ pt = goldfish_sync_fence_to_sync_pt(fence);
+ tl = goldfish_dma_fence_parent(fence);
+ list_add_tail(&pt->active_list, &tl->active_list_head);
+ return true;
+}
+
+static void goldfish_sync_timeline_fence_value_str(struct dma_fence *fence,
+ char *str, int size)
+{
+ snprintf(str, size, "%d", fence->seqno);
+}
+
+static void goldfish_sync_timeline_fence_timeline_value_str(
+ struct dma_fence *fence,
+ char *str, int size)
+{
+ struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
+
+ snprintf(str, size, "%d", tl->seqno);
+}
+
+static const struct dma_fence_ops goldfish_sync_timeline_fence_ops = {
+ .get_driver_name = goldfish_sync_timeline_fence_get_driver_name,
+ .get_timeline_name = goldfish_sync_timeline_fence_get_timeline_name,
+ .enable_signaling = goldfish_sync_timeline_fence_enable_signaling,
+ .signaled = goldfish_sync_timeline_fence_signaled,
+ .wait = dma_fence_default_wait,
+ .release = goldfish_sync_timeline_fence_release,
+ .fence_value_str = goldfish_sync_timeline_fence_value_str,
+ .timeline_value_str = goldfish_sync_timeline_fence_timeline_value_str,
+};
+
+static int __must_check
+goldfish_sync_fence_create(struct goldfish_sync_timeline *tl, u32 val)
+{
+ struct sync_pt *pt;
+ struct sync_file *sync_file_obj = NULL;
+ int fd;
+
+ pt = goldfish_sync_pt_create(tl, val);
+ if (!pt)
+ return -1;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ goto err_cleanup_pt;
+
+ sync_file_obj = sync_file_create(&pt->base);
+ if (!sync_file_obj)
+ goto err_cleanup_fd_pt;
+
+ fd_install(fd, sync_file_obj->file);
+
+ dma_fence_put(&pt->base); /* sync_file_obj now owns the fence */
+ return fd;
+
+err_cleanup_fd_pt:
+ put_unused_fd(fd);
+err_cleanup_pt:
+ goldfish_sync_pt_destroy(pt);
+
+ return -1;
+}
+
+static inline void
+goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
+ u32 cmd,
+ u64 handle,
+ u32 time_arg,
+ u64 hostcmd_handle)
+{
+ struct goldfish_sync_hostcmd *to_add;
+
+ WARN_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS);
+
+ to_add = &sync_state->to_do[sync_state->to_do_end];
+
+ to_add->cmd = cmd;
+ to_add->handle = handle;
+ to_add->time_arg = time_arg;
+ to_add->hostcmd_handle = hostcmd_handle;
+
+ ++sync_state->to_do_end;
+}
+
+static inline void
+goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
+ u32 cmd,
+ u64 handle,
+ u32 time_arg,
+ u64 hostcmd_handle)
+{
+ unsigned long irq_flags;
+ struct goldfish_sync_hostcmd *batch_hostcmd =
+ &sync_state->batch_hostcmd;
+
+ spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
+
+ batch_hostcmd->cmd = cmd;
+ batch_hostcmd->handle = handle;
+ batch_hostcmd->time_arg = time_arg;
+ batch_hostcmd->hostcmd_handle = hostcmd_handle;
+ writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
+
+ spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
+}
+
+static inline void
+goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
+ u32 cmd,
+ u64 glsync_handle,
+ u64 thread_handle,
+ u64 timeline_handle)
+{
+ unsigned long irq_flags;
+ struct goldfish_sync_guestcmd *batch_guestcmd =
+ &sync_state->batch_guestcmd;
+
+ spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
+
+ batch_guestcmd->host_command = cmd;
+ batch_guestcmd->glsync_handle = glsync_handle;
+ batch_guestcmd->thread_handle = thread_handle;
+ batch_guestcmd->guest_timeline_handle = timeline_handle;
+ writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
+
+ spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
+}
+
+/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
+ * In the context of OpenGL, this interrupt will fire whenever we need
+ * to signal a fence fd in the guest, with the command
+ * |CMD_SYNC_TIMELINE_INC|.
+ * However, because this function will be called in an interrupt context,
+ * it is necessary to do the actual work of signaling off of interrupt context.
+ * The shared work queue is used for this purpose. At the end when
+ * all pending commands are intercepted by the interrupt handler,
+ * we call |schedule_work|, which will later run the actual
+ * desired sync command in |goldfish_sync_work_item_fn|.
+ */
+static irqreturn_t
+goldfish_sync_interrupt_impl(struct goldfish_sync_state *sync_state)
+{
+ struct goldfish_sync_hostcmd *batch_hostcmd =
+ &sync_state->batch_hostcmd;
+
+ spin_lock(&sync_state->to_do_lock);
+ for (;;) {
+ u32 nextcmd;
+ u32 command_r;
+ u64 handle_rw;
+ u32 time_r;
+ u64 hostcmd_handle_rw;
+
+ readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
+ nextcmd = batch_hostcmd->cmd;
+
+ if (nextcmd == 0)
+ break;
+
+ command_r = nextcmd;
+ handle_rw = batch_hostcmd->handle;
+ time_r = batch_hostcmd->time_arg;
+ hostcmd_handle_rw = batch_hostcmd->hostcmd_handle;
+
+ goldfish_sync_cmd_queue(sync_state,
+ command_r,
+ handle_rw,
+ time_r,
+ hostcmd_handle_rw);
+ }
+ spin_unlock(&sync_state->to_do_lock);
+
+ schedule_work(&sync_state->work_item);
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations goldfish_sync_fops;
+
+static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
+{
+ struct goldfish_sync_state *sync_state = dev_id;
+
+ return (sync_state->miscdev.fops == &goldfish_sync_fops) ?
+ goldfish_sync_interrupt_impl(sync_state) : IRQ_NONE;
+}
+
+/* We expect that commands will come in at a slow enough rate
+ * so that incoming items will not be more than
+ * GOLDFISH_SYNC_MAX_CMDS.
+ *
+ * This is because the way the sync device is used,
+ * it's only for managing buffer data transfers per frame,
+ * with a sequential dependency between putting things in
+ * to_do and taking them out. Once a set of commands is
+ * queued up in to_do, the user of the device waits for
+ * them to be processed before queuing additional commands,
+ * which limits the rate at which commands come in
+ * to the rate at which we take them out here.
+ *
+ * We also don't expect more than MAX_CMDS to be issued
+ * at once; there is a correspondence between
+ * which buffers need swapping to the (display / buffer queue)
+ * to particular commands, and we don't expect there to be
+ * enough display or buffer queues in operation at once
+ * to overrun GOLDFISH_SYNC_MAX_CMDS.
+ */
+static u32 __must_check
+goldfish_sync_grab_commands(struct goldfish_sync_state *sync_state,
+ struct goldfish_sync_hostcmd *dst)
+{
+ u32 to_do_end;
+ u32 i;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
+
+ to_do_end = sync_state->to_do_end;
+ for (i = 0; i < to_do_end; i++)
+ dst[i] = sync_state->to_do[i];
+ sync_state->to_do_end = 0;
+
+ spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
+
+ return to_do_end;
+}
+
+void goldfish_sync_run_hostcmd(struct goldfish_sync_state *sync_state,
+ struct goldfish_sync_hostcmd *todo)
+{
+ struct goldfish_sync_timeline *tl =
+ (struct goldfish_sync_timeline *)(uintptr_t)todo->handle;
+ int sync_fence_fd;
+
+ switch (todo->cmd) {
+ case CMD_SYNC_READY:
+ break;
+
+ case CMD_CREATE_SYNC_TIMELINE:
+ tl = goldfish_sync_timeline_create(sync_state);
+ WARN_ON(!tl);
+ goldfish_sync_hostcmd_reply(sync_state,
+ CMD_CREATE_SYNC_TIMELINE,
+ (uintptr_t)tl,
+ 0,
+ todo->hostcmd_handle);
+ break;
+
+ case CMD_CREATE_SYNC_FENCE:
+ WARN_ON(!tl);
+ sync_fence_fd = goldfish_sync_fence_create(tl, todo->time_arg);
+ goldfish_sync_hostcmd_reply(sync_state,
+ CMD_CREATE_SYNC_FENCE,
+ sync_fence_fd,
+ 0,
+ todo->hostcmd_handle);
+ break;
+
+ case CMD_SYNC_TIMELINE_INC:
+ WARN_ON(!tl);
+ goldfish_sync_timeline_signal(tl, todo->time_arg);
+ break;
+
+ case CMD_DESTROY_SYNC_TIMELINE:
+ WARN_ON(!tl);
+ goldfish_sync_timeline_put(tl);
+ break;
+ }
+}
+
+/* |goldfish_sync_work_item_fn| does the actual work of servicing
+ * host->guest sync commands. This function is triggered whenever
+ * the IRQ for the goldfish sync device is raised. Once it starts
+ * running, it grabs the contents of the buffer containing the
+ * commands it needs to execute (there may be multiple, because
+ * our IRQ is active high and not edge triggered), and then
+ * runs all of them one after the other.
+ */
+static void goldfish_sync_work_item_fn(struct work_struct *input)
+{
+ struct goldfish_sync_state *sync_state =
+ container_of(input, struct goldfish_sync_state, work_item);
+
+ struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS];
+ u32 to_do_end;
+ u32 i;
+
+ mutex_lock(&sync_state->mutex_lock);
+
+ to_do_end = goldfish_sync_grab_commands(sync_state, to_run);
+
+ for (i = 0; i < to_do_end; i++)
+ goldfish_sync_run_hostcmd(sync_state, &to_run[i]);
+
+ mutex_unlock(&sync_state->mutex_lock);
+}
+
+static int goldfish_sync_open(struct inode *inode, struct file *filp)
+{
+ struct goldfish_sync_state *sync_state =
+ container_of(filp->private_data,
+ struct goldfish_sync_state,
+ miscdev);
+
+ if (mutex_lock_interruptible(&sync_state->mutex_lock))
+ return -ERESTARTSYS;
+
+ filp->private_data = goldfish_sync_timeline_create(sync_state);
+ mutex_unlock(&sync_state->mutex_lock);
+
+ return filp->private_data ? 0 : -ENOMEM;
+}
+
+static int goldfish_sync_release(struct inode *inode, struct file *filp)
+{
+ struct goldfish_sync_timeline *tl = filp->private_data;
+
+ goldfish_sync_timeline_put(tl);
+ return 0;
+}
+
+/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
+ * and is used in conjunction with eglCreateSyncKHR to queue up the
+ * actual work of waiting for the EGL sync command to complete,
+ * possibly returning a fence fd to the guest.
+ */
+static long
+goldfish_sync_ioctl_locked(struct goldfish_sync_timeline *tl,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct goldfish_sync_ioctl_info ioctl_data;
+ int fd_out = -1;
+
+ switch (cmd) {
+ case GOLDFISH_SYNC_IOC_QUEUE_WORK:
+ if (copy_from_user(&ioctl_data,
+ (void __user *)arg,
+ sizeof(ioctl_data)))
+ return -EFAULT;
+
+ if (!ioctl_data.host_syncthread_handle_in)
+ return -EFAULT;
+
+ fd_out = goldfish_sync_fence_create(tl, tl->seqno + 1);
+ ioctl_data.fence_fd_out = fd_out;
+
+ if (copy_to_user((void __user *)arg,
+ &ioctl_data,
+ sizeof(ioctl_data))) {
+ ksys_close(fd_out);
+ return -EFAULT;
+ }
+
+ /* We are now about to trigger a host-side wait;
+ * accumulate on |pending_waits|.
+ */
+ goldfish_sync_send_guestcmd(tl->sync_state,
+ CMD_TRIGGER_HOST_WAIT,
+ ioctl_data.host_glsync_handle_in,
+ ioctl_data.host_syncthread_handle_in,
+ (u64)(uintptr_t)tl);
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long goldfish_sync_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct goldfish_sync_timeline *tl = filp->private_data;
+ struct goldfish_sync_state *x = tl->sync_state;
+ long res;
+
+ if (mutex_lock_interruptible(&x->mutex_lock))
+ return -ERESTARTSYS;
+
+ res = goldfish_sync_ioctl_locked(tl, cmd, arg);
+ mutex_unlock(&x->mutex_lock);
+
+ return res;
+}
+
+static bool setup_verify_batch_cmd_addr(char *reg_base,
+ void *batch_addr,
+ u32 addr_offset,
+ u32 addr_offset_high)
+{
+ u64 batch_addr_phys;
+ u64 batch_addr_phys_test_lo;
+ u64 batch_addr_phys_test_hi;
+
+ batch_addr_phys = virt_to_phys(batch_addr);
+ writel(lower_32_bits(batch_addr_phys), reg_base + addr_offset);
+ writel(upper_32_bits(batch_addr_phys), reg_base + addr_offset_high);
+
+ batch_addr_phys_test_lo = readl(reg_base + addr_offset);
+ batch_addr_phys_test_hi = readl(reg_base + addr_offset_high);
+
+ batch_addr_phys = batch_addr_phys_test_lo |
+ (batch_addr_phys_test_hi << 32);
+
+ return virt_to_phys(batch_addr) == batch_addr_phys;
+}
+
+static const struct file_operations goldfish_sync_fops = {
+ .owner = THIS_MODULE,
+ .open = goldfish_sync_open,
+ .release = goldfish_sync_release,
+ .unlocked_ioctl = goldfish_sync_ioctl,
+ .compat_ioctl = goldfish_sync_ioctl,
+};
+
+static void fill_miscdevice(struct miscdevice *misc)
+{
+ misc->name = GOLDFISH_SYNC_DEVICE_NAME;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->fops = &goldfish_sync_fops;
+}
+
+static int goldfish_sync_probe(struct platform_device *pdev)
+{
+ struct goldfish_sync_state *sync_state;
+ struct resource *ioresource;
+ int result;
+
+ sync_state = devm_kzalloc(&pdev->dev, sizeof(*sync_state), GFP_KERNEL);
+ if (!sync_state)
+ return -ENOMEM;
+
+ spin_lock_init(&sync_state->to_do_lock);
+ mutex_init(&sync_state->mutex_lock);
+ INIT_WORK(&sync_state->work_item, goldfish_sync_work_item_fn);
+
+ ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ioresource)
+ return -ENODEV;
+
+ sync_state->reg_base =
+ devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
+ if (!sync_state->reg_base)
+ return -ENOMEM;
+
+ result = platform_get_irq(pdev, 0);
+ if (result < 0)
+ return -ENODEV;
+
+ sync_state->irq = result;
+
+ result = devm_request_irq(&pdev->dev,
+ sync_state->irq,
+ goldfish_sync_interrupt,
+ IRQF_SHARED,
+ pdev->name,
+ sync_state);
+ if (result)
+ return -ENODEV;
+
+ if (!setup_verify_batch_cmd_addr(sync_state->reg_base,
+ &sync_state->batch_hostcmd,
+ SYNC_REG_BATCH_COMMAND_ADDR,
+ SYNC_REG_BATCH_COMMAND_ADDR_HIGH))
+ return -ENODEV;
+
+ if (!setup_verify_batch_cmd_addr(sync_state->reg_base,
+ &sync_state->batch_guestcmd,
+ SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
+ SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH))
+ return -ENODEV;
+
+ fill_miscdevice(&sync_state->miscdev);
+ result = misc_register(&sync_state->miscdev);
+ if (result)
+ return -ENODEV;
+
+ writel(0, sync_state->reg_base + SYNC_REG_INIT);
+
+ platform_set_drvdata(pdev, sync_state);
+
+ return 0;
+}
+
+static int goldfish_sync_remove(struct platform_device *pdev)
+{
+ struct goldfish_sync_state *sync_state = platform_get_drvdata(pdev);
+
+ misc_deregister(&sync_state->miscdev);
+ return 0;
+}
+
+static const struct of_device_id goldfish_sync_of_match[] = {
+ { .compatible = "google,goldfish-sync", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
+
+static const struct acpi_device_id goldfish_sync_acpi_match[] = {
+ { "GFSH0006", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
+
+static struct platform_driver goldfish_sync = {
+ .probe = goldfish_sync_probe,
+ .remove = goldfish_sync_remove,
+ .driver = {
+ .name = GOLDFISH_SYNC_DEVICE_NAME,
+ .of_match_table = goldfish_sync_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
+ }
+};
+module_platform_driver(goldfish_sync);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Android QEMU Sync Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0");
diff --git a/uapi/goldfish_address_space.h b/uapi/goldfish_address_space.h
new file mode 100644
index 0000000..f18304f
--- /dev/null
+++ b/uapi/goldfish_address_space.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef UAPI_GOLDFISH_ADDRESS_SPACE_H
+#define UAPI_GOLDFISH_ADDRESS_SPACE_H
+
+#include <linux/types.h>
+
+#define GOLDFISH_ADDRESS_SPACE_DEVICE_NAME "goldfish_address_space"
+
+struct goldfish_address_space_allocate_block {
+ __u64 size;
+ __u64 offset;
+ __u64 phys_addr;
+};
+
+struct goldfish_address_space_ping {
+ __u64 offset;
+ __u64 size;
+ __u64 metadata;
+ __u32 version;
+ __u32 wait_fd;
+ __u32 wait_flags;
+ __u32 direction;
+};
+
+struct goldfish_address_space_claim_shared {
+ __u64 offset;
+ __u64 size;
+};
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC 'G'
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_OP(OP, T) \
+ _IOWR(GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC, OP, T)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK \
+ GOLDFISH_ADDRESS_SPACE_IOCTL_OP(10, \
+ struct goldfish_address_space_allocate_block)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK \
+ GOLDFISH_ADDRESS_SPACE_IOCTL_OP(11, __u64)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_PING \
+ GOLDFISH_ADDRESS_SPACE_IOCTL_OP(12, \
+ struct goldfish_address_space_ping)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED \
+ GOLDFISH_ADDRESS_SPACE_IOCTL_OP(13, \
+ struct goldfish_address_space_claim_shared)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED \
+ GOLDFISH_ADDRESS_SPACE_IOCTL_OP(14, __u64)
+
+#endif /* UAPI_GOLDFISH_ADDRESS_SPACE_H */
diff --git a/uapi/goldfish_dma.h b/uapi/goldfish_dma.h
new file mode 100644
index 0000000..a901902
--- /dev/null
+++ b/uapi/goldfish_dma.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UAPI_GOLDFISH_DMA_H
+#define UAPI_GOLDFISH_DMA_H
+
+#include <linux/types.h>
+
+/* GOLDFISH DMA
+ *
+ * Goldfish DMA is an extension to the pipe device
+ * and is designed to facilitate high-speed RAM->RAM
+ * transfers from guest to host.
+ *
+ * Interface (guest side):
+ *
+ * The guest user calls goldfish_dma_alloc (ioctls)
+ * and then mmap() on a goldfish pipe fd,
+ * which means that it wants high-speed access to
+ * host-visible memory.
+ *
+ * The guest can then write into the pointer
+ * returned by mmap(), and these writes
+ * become immediately visible on the host without BQL
+ * or otherweise context switching.
+ *
+ * dma_alloc_coherent() is used to obtain contiguous
+ * physical memory regions, and we allocate and interact
+ * with this region on both guest and host through
+ * the following ioctls:
+ *
+ * - LOCK: lock the region for data access.
+ * - UNLOCK: unlock the region. This may also be done from the host
+ * through the WAKE_ON_UNLOCK_DMA procedure.
+ * - CREATE_REGION: initialize size info for a dma region.
+ * - GETOFF: send physical address to guest drivers.
+ * - (UN)MAPHOST: uses goldfish_pipe_cmd to tell the host to
+ * (un)map to the guest physical address associated
+ * with the current dma context. This makes the physically
+ * contiguous memory (in)visible to the host.
+ *
+ * Guest userspace obtains a pointer to the DMA memory
+ * through mmap(), which also lazily allocates the memory
+ * with dma_alloc_coherent. (On last pipe close(), the region is freed).
+ * The mmaped() region can handle very high bandwidth
+ * transfers, and pipe operations can be used at the same
+ * time to handle synchronization and command communication.
+ */
+
+#define GOLDFISH_DMA_BUFFER_SIZE (32 * 1024 * 1024)
+
+struct goldfish_dma_ioctl_info {
+ __u64 phys_begin;
+ __u64 size;
+};
+
+/* There is an ioctl associated with goldfish dma driver.
+ * Make it conflict with ioctls that are not likely to be used
+ * in the emulator.
+ * 'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
+ * 'G' 00-0F linux/gigaset_dev.h conflict!
+ */
+#define GOLDFISH_DMA_IOC_MAGIC 'G'
+#define GOLDFISH_DMA_IOC_OP(OP) _IOWR(GOLDFISH_DMA_IOC_MAGIC, OP, \
+ struct goldfish_dma_ioctl_info)
+
+#define GOLDFISH_DMA_IOC_LOCK GOLDFISH_DMA_IOC_OP(0)
+#define GOLDFISH_DMA_IOC_UNLOCK GOLDFISH_DMA_IOC_OP(1)
+#define GOLDFISH_DMA_IOC_GETOFF GOLDFISH_DMA_IOC_OP(2)
+#define GOLDFISH_DMA_IOC_CREATE_REGION GOLDFISH_DMA_IOC_OP(3)
+
+#endif /* UAPI_GOLDFISH_DMA_H */
diff --git a/uapi/goldfish_sync.h b/uapi/goldfish_sync.h
new file mode 100644
index 0000000..01d762f
--- /dev/null
+++ b/uapi/goldfish_sync.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef UAPI_GOLDFISH_SYNC_H
+#define UAPI_GOLDFISH_SYNC_H
+
+#include <linux/types.h>
+
+#define GOLDFISH_SYNC_DEVICE_NAME "goldfish_sync"
+
+struct goldfish_sync_ioctl_info {
+ __u64 host_glsync_handle_in;
+ __u64 host_syncthread_handle_in;
+ __s32 fence_fd_out;
+};
+
+/* There is an ioctl associated with goldfish sync driver.
+ * Make it conflict with ioctls that are not likely to be used
+ * in the emulator.
+ *
+ * '@' 00-0F linux/radeonfb.h conflict!
+ * '@' 00-0F drivers/video/aty/aty128fb.c conflict!
+ */
+#define GOLDFISH_SYNC_IOC_MAGIC '@'
+
+#define GOLDFISH_SYNC_IOC_QUEUE_WORK \
+ _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info)
+
+#endif /* UAPI_GOLDFISH_SYNC_H */