summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColin Downs-Razouk <colindr@google.com>2024-03-04 21:05:32 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2024-03-04 21:05:32 +0000
commit9b39747550f44621e28d766f566ca96855e00865 (patch)
tree81d397e379a22e9deddb5ebace603728687c81bf
parentf4885d4b5a302d30d7f40c6eb2872fcdc3c9c6f3 (diff)
parent4ed81e36b2160e5712805226ee39edae56505668 (diff)
downloadvirtual-device-android-gs-bluejay-5.10-android15-beta.tar.gz
* changes: ANDROID: virtio-dxgkrnl: add seqno debug logs ANDROID: virtio-dxgkrnl: ESM flag fix ANDROID: virtio-dxgkrnl: disable device state caching ANDROID: virtio-dxgkrnl: save output privateData ANDROID: virtio-dxgkrnl: present virtual ANDROID: virtio-dxgkrnl: fix red herring errors ANDROID: virtio-dxgkrnl: return result from all sync commands ANDROID: virtio-dxgkrnl: return ntstatus from createAllocation ANDROID: virtio-dxgkrnl: return ntstatus from reserve gpu va ANDROID: virtio-dxgkrnl: remove SharedHandle expectation ANDROID: virtio-dxgkrnl: use d3dddi_allocationinfo ANDROID: virtio-dxgkrnl: fix sync object cpu va map ANDROID: virtio-dxgkrnl: call open adapter ANDROID: virtio-dxgkrnl: events and command completions ANDROID: virtio-dxgkrnl: initial implementation ANDROID: virtio-dxgkrnl: convert to virtio ANDROID: virtio_dxgkrnl: fix formatting issues ANDROID: Allow killing a process waiting for a VM bus message reply ANDROID: drivers: hv: dxgkrnl: Implement support for mapping guest pages on the host. ANDROID: drivers: hv: dxgkrnl: Implement DXGSYNCFILE ANDROID: drivers: hv: dxgkrnl: Implementation of submit command, paging and hardware queue. ANDROID: drivers: hv: dxgkrnl: Seal the shared resource object when dxgk_share_objects is called. ANDROID: drivers: hv: dxgkrnl: Implement sharing resources and sync objects ANDROID: drivers: hv: dxgkrnl: Implement operations with GPU sync objects ANDROID: drivers: hv: dxgkrnl: Implement creation/destruction of GPU allocations/resources ANDROID: drivers: hv: dxgkrnl: Driver initialization and creation of dxgadapter
-rw-r--r--Kbuild1
-rw-r--r--virtio_dxgkrnl/Kbuild7
-rw-r--r--virtio_dxgkrnl/dxgadapter.c1381
-rw-r--r--virtio_dxgkrnl/dxgglobal.c408
-rw-r--r--virtio_dxgkrnl/dxgglobal.h25
-rw-r--r--virtio_dxgkrnl/dxgkrnl.h972
-rw-r--r--virtio_dxgkrnl/dxgprocess.c345
-rw-r--r--virtio_dxgkrnl/dxgsyncfile.c210
-rw-r--r--virtio_dxgkrnl/dxgsyncfile.h30
-rw-r--r--virtio_dxgkrnl/dxgvmbus.c3502
-rw-r--r--virtio_dxgkrnl/dxgvmbus.h953
-rw-r--r--virtio_dxgkrnl/hmgr.c601
-rw-r--r--virtio_dxgkrnl/hmgr.h112
-rw-r--r--virtio_dxgkrnl/include/uapi/d3dkmthk.h1972
-rw-r--r--virtio_dxgkrnl/ioctl.c5619
-rw-r--r--virtio_dxgkrnl/misc.c43
-rw-r--r--virtio_dxgkrnl/misc.h97
-rw-r--r--virtio_dxgkrnl/virtio_dxgkrnl.c826
-rw-r--r--virtio_dxgkrnl/virtio_dxgkrnl.h71
19 files changed, 17175 insertions, 0 deletions
diff --git a/Kbuild b/Kbuild
index d0aa14f..1733c19 100644
--- a/Kbuild
+++ b/Kbuild
@@ -6,3 +6,4 @@ obj-$(CONFIG_VIRT_WIFI) += wlan_simulation/
obj-${BUILD_GOLDFISH_DRIVERS} += goldfish_drivers/
obj-${BUILD_RTL8821CU} += rtl8821cu/
+obj-${BUILD_VIRTIO_DXGKRNL} += virtio_dxgkrnl/
diff --git a/virtio_dxgkrnl/Kbuild b/virtio_dxgkrnl/Kbuild
new file mode 100644
index 0000000..c8b1d5c
--- /dev/null
+++ b/virtio_dxgkrnl/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+virtio-dxgkrnl-objs := virtio_dxgkrnl.o dxgglobal.o hmgr.o misc.o \
+ dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o dxgsyncfile.o
+
+obj-m += virtio-dxgkrnl.o
+LINUXINCLUDE := -I$(srctree)/../common-modules/virtual-device/uapi ${LINUXINCLUDE}
diff --git a/virtio_dxgkrnl/dxgadapter.c b/virtio_dxgkrnl/dxgadapter.c
new file mode 100644
index 0000000..10f1251
--- /dev/null
+++ b/virtio_dxgkrnl/dxgadapter.c
@@ -0,0 +1,1381 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Implementation of dxgadapter and its objects
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/hyperv.h>
+#include <linux/pagemap.h>
+#include <linux/eventfd.h>
+
+#include "dxgkrnl.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+#undef dev_fmt
+#define dev_fmt(fmt) "dxgk: " fmt
+
+int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev)
+{
+ int ret;
+
+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance,
+ &adapter->luid);
+ dev_dbg(dxgglobaldev, "%s: %x:%x %p %pUb\n",
+ __func__, adapter->luid.b, adapter->luid.a, hdev->channel,
+ &hdev->channel->offermsg.offer.if_instance);
+
+ ret = dxgvmbuschannel_init(&adapter->channel, hdev);
+ if (ret)
+ goto cleanup;
+
+ adapter->channel.adapter = adapter;
+ adapter->hv_dev = hdev;
+
+ ret = dxgvmb_send_open_adapter(adapter);
+ if (ret < 0) {
+ pr_err("dxgvmb_send_open_adapter failed: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_get_internal_adapter_info(adapter);
+ if (ret < 0)
+ pr_err("get_internal_adapter_info failed: %d", ret);
+
+cleanup:
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+void dxgadapter_start(struct dxgadapter *adapter)
+{
+ struct dxgvgpuchannel *ch = NULL;
+ struct dxgvgpuchannel *entry;
+ int ret;
+
+ dev_dbg(dxgglobaldev, "%s %x-%x",
+ __func__, adapter->luid.a, adapter->luid.b);
+
+ /* Find the corresponding vGPU vm bus channel */
+ list_for_each_entry(entry, &dxgglobal->vgpu_ch_list_head,
+ vgpu_ch_list_entry) {
+ if (memcmp(&adapter->luid,
+ &entry->adapter_luid,
+ sizeof(struct winluid)) == 0) {
+ ch = entry;
+ break;
+ }
+ }
+ if (ch == NULL) {
+ dev_dbg(dxgglobaldev, "%s vGPU chanel is not ready", __func__);
+ return;
+ }
+
+ /* The global channel is initialized when the first adapter starts */
+ if (!dxgglobal->global_channel_initialized) {
+ ret = dxgglobal_init_global_channel();
+ if (ret) {
+ dxgglobal_destroy_global_channel();
+ return;
+ }
+ dxgglobal->global_channel_initialized = true;
+ }
+
+ /* Initialize vGPU vm bus channel */
+ if (ch->hdev != NULL) {
+ ret = dxgadapter_set_vmbus(adapter, ch->hdev);
+ if (ret) {
+ pr_err("Failed to start adapter %p", adapter);
+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED;
+ return;
+ }
+ } else {
+ ret = dxgvmb_send_open_adapter(adapter);
+ if (ret < 0) {
+ pr_err("dxgvmb_send_open_adapter failed: %d\n", ret);
+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED;
+ return;
+ }
+ }
+
+ adapter->adapter_state = DXGADAPTER_STATE_ACTIVE;
+ dev_dbg(dxgglobaldev, "%s Adapter started %p", __func__, adapter);
+}
+
+void dxgadapter_stop(struct dxgadapter *adapter)
+{
+ struct dxgprocess_adapter *entry;
+ bool adapter_stopped = false;
+
+ down_write(&adapter->core_lock);
+ if (!adapter->stopping_adapter)
+ adapter->stopping_adapter = true;
+ else
+ adapter_stopped = true;
+ up_write(&adapter->core_lock);
+
+ if (adapter_stopped)
+ return;
+
+ dxgglobal_acquire_process_adapter_lock();
+
+ list_for_each_entry(entry, &adapter->adapter_process_list_head,
+ adapter_process_list_entry) {
+ dxgprocess_adapter_stop(entry);
+ }
+
+ dxgglobal_release_process_adapter_lock();
+
+ if (dxgadapter_acquire_lock_exclusive(adapter) == 0) {
+ dxgvmb_send_close_adapter(adapter);
+ dxgadapter_release_lock_exclusive(adapter);
+ }
+ dxgvmbuschannel_destroy(&adapter->channel);
+
+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED;
+}
+
+void dxgadapter_release(struct kref *refcount)
+{
+ struct dxgadapter *adapter;
+
+ adapter = container_of(refcount, struct dxgadapter, adapter_kref);
+ dev_dbg(dxgglobaldev, "%s %p\n", __func__, adapter);
+ vfree(adapter);
+}
+
+bool dxgadapter_is_active(struct dxgadapter *adapter)
+{
+ return adapter->adapter_state == DXGADAPTER_STATE_ACTIVE;
+}
+
+/* Protected by dxgglobal_acquire_process_adapter_lock */
+void dxgadapter_add_process(struct dxgadapter *adapter,
+ struct dxgprocess_adapter *process_info)
+{
+ dev_dbg(dxgglobaldev, "%s %p %p", __func__, adapter, process_info);
+ list_add_tail(&process_info->adapter_process_list_entry,
+ &adapter->adapter_process_list_head);
+}
+
+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info)
+{
+ dev_dbg(dxgglobaldev, "%s %p %p", __func__,
+ process_info->adapter, process_info);
+ list_del(&process_info->adapter_process_list_entry);
+ process_info->adapter_process_list_entry.next = NULL;
+ process_info->adapter_process_list_entry.prev = NULL;
+}
+
+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter,
+ struct dxgsharedresource *object)
+{
+ down_write(&adapter->shared_resource_list_lock);
+ if (object->shared_resource_list_entry.next) {
+ list_del(&object->shared_resource_list_entry);
+ object->shared_resource_list_entry.next = NULL;
+ }
+ up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
+ struct dxgsharedsyncobject *object)
+{
+ down_write(&adapter->shared_resource_list_lock);
+ list_add_tail(&object->adapter_shared_syncobj_list_entry,
+ &adapter->adapter_shared_syncobj_list_head);
+ up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
+ struct dxgsharedsyncobject *object)
+{
+ down_write(&adapter->shared_resource_list_lock);
+ if (object->adapter_shared_syncobj_list_entry.next) {
+ list_del(&object->adapter_shared_syncobj_list_entry);
+ object->adapter_shared_syncobj_list_entry.next = NULL;
+ }
+ up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_add_syncobj(struct dxgadapter *adapter,
+ struct dxgsyncobject *object)
+{
+ down_write(&adapter->shared_resource_list_lock);
+ list_add_tail(&object->syncobj_list_entry, &adapter->syncobj_list_head);
+ up_write(&adapter->shared_resource_list_lock);
+}
+
+void dxgadapter_remove_syncobj(struct dxgsyncobject *object)
+{
+ down_write(&object->adapter->shared_resource_list_lock);
+ if (object->syncobj_list_entry.next) {
+ list_del(&object->syncobj_list_entry);
+ object->syncobj_list_entry.next = NULL;
+ }
+ up_write(&object->adapter->shared_resource_list_lock);
+}
+
+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter)
+{
+ down_write(&adapter->core_lock);
+ if (adapter->adapter_state != DXGADAPTER_STATE_ACTIVE) {
+ dxgadapter_release_lock_exclusive(adapter);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter)
+{
+ down_write(&adapter->core_lock);
+}
+
+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter)
+{
+ up_write(&adapter->core_lock);
+}
+
+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter)
+{
+ down_read(&adapter->core_lock);
+ if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE)
+ return 0;
+ dxgadapter_release_lock_shared(adapter);
+ return -ENODEV;
+}
+
+void dxgadapter_release_lock_shared(struct dxgadapter *adapter)
+{
+ up_read(&adapter->core_lock);
+}
+
+struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter,
+ struct dxgprocess *process)
+{
+ struct dxgdevice *device = vzalloc(sizeof(struct dxgdevice));
+ int ret;
+
+ if (device) {
+ kref_init(&device->device_kref);
+ device->adapter = adapter;
+ device->process = process;
+ kref_get(&adapter->adapter_kref);
+ INIT_LIST_HEAD(&device->context_list_head);
+ INIT_LIST_HEAD(&device->alloc_list_head);
+ INIT_LIST_HEAD(&device->resource_list_head);
+ init_rwsem(&device->device_lock);
+ init_rwsem(&device->context_list_lock);
+ init_rwsem(&device->alloc_list_lock);
+ INIT_LIST_HEAD(&device->pqueue_list_head);
+ INIT_LIST_HEAD(&device->syncobj_list_head);
+ device->object_state = DXGOBJECTSTATE_CREATED;
+ device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE;
+
+ ret = dxgprocess_adapter_add_device(process, adapter, device);
+ if (ret < 0) {
+ kref_put(&device->device_kref, dxgdevice_release);
+ device = NULL;
+ }
+ }
+ return device;
+}
+
+void dxgdevice_stop(struct dxgdevice *device)
+{
+ struct dxgallocation *alloc;
+ struct dxgpagingqueue *pqueue;
+ struct dxgsyncobject *syncobj;
+
+ dev_dbg(dxgglobaldev, "%s: %p", __func__, device);
+ dxgdevice_acquire_alloc_list_lock(device);
+ list_for_each_entry(alloc, &device->alloc_list_head, alloc_list_entry) {
+ dxgallocation_stop(alloc);
+ }
+ dxgdevice_release_alloc_list_lock(device);
+
+ hmgrtable_lock(&device->process->handle_table, DXGLOCK_EXCL);
+ list_for_each_entry(pqueue, &device->pqueue_list_head,
+ pqueue_list_entry) {
+ dxgpagingqueue_stop(pqueue);
+ }
+ list_for_each_entry(syncobj, &device->syncobj_list_head,
+ syncobj_list_entry) {
+ dxgsyncobject_stop(syncobj);
+ }
+ hmgrtable_unlock(&device->process->handle_table, DXGLOCK_EXCL);
+ dev_dbg(dxgglobaldev, "%s: end %p\n", __func__, device);
+}
+
+void dxgdevice_mark_destroyed(struct dxgdevice *device)
+{
+ down_write(&device->device_lock);
+ device->object_state = DXGOBJECTSTATE_DESTROYED;
+ up_write(&device->device_lock);
+}
+
+void dxgdevice_destroy(struct dxgdevice *device)
+{
+ struct dxgprocess *process = device->process;
+ struct dxgadapter *adapter = device->adapter;
+ struct d3dkmthandle device_handle = {};
+
+ dev_dbg(dxgglobaldev, "%s: %p\n", __func__, device);
+
+ down_write(&device->device_lock);
+
+ if (device->object_state != DXGOBJECTSTATE_ACTIVE)
+ goto cleanup;
+
+ device->object_state = DXGOBJECTSTATE_DESTROYED;
+
+ dxgdevice_stop(device);
+
+ dxgdevice_acquire_alloc_list_lock(device);
+
+ while (!list_empty(&device->syncobj_list_head)) {
+ struct dxgsyncobject *syncobj =
+ list_first_entry(&device->syncobj_list_head,
+ struct dxgsyncobject,
+ syncobj_list_entry);
+ list_del(&syncobj->syncobj_list_entry);
+ syncobj->syncobj_list_entry.next = NULL;
+ dxgdevice_release_alloc_list_lock(device);
+
+ dxgsyncobject_destroy(process, syncobj);
+
+ dxgdevice_acquire_alloc_list_lock(device);
+ }
+
+ {
+ struct dxgallocation *alloc;
+ struct dxgallocation *tmp;
+
+ dev_dbg(dxgglobaldev, "destroying allocations\n");
+ list_for_each_entry_safe(alloc, tmp, &device->alloc_list_head,
+ alloc_list_entry) {
+ dxgallocation_destroy(alloc);
+ }
+ }
+
+ {
+ struct dxgresource *resource;
+ struct dxgresource *tmp;
+
+ dev_dbg(dxgglobaldev, "destroying resources\n");
+ list_for_each_entry_safe(resource, tmp,
+ &device->resource_list_head,
+ resource_list_entry) {
+ dxgresource_destroy(resource);
+ }
+ }
+
+ dxgdevice_release_alloc_list_lock(device);
+
+ {
+ struct dxgcontext *context;
+ struct dxgcontext *tmp;
+
+ dev_dbg(dxgglobaldev, "destroying contexts\n");
+ dxgdevice_acquire_context_list_lock(device);
+ list_for_each_entry_safe(context, tmp,
+ &device->context_list_head,
+ context_list_entry) {
+ dxgcontext_destroy(process, context);
+ }
+ dxgdevice_release_context_list_lock(device);
+ }
+
+ {
+ struct dxgpagingqueue *tmp;
+ struct dxgpagingqueue *pqueue;
+
+ dev_dbg(dxgglobaldev, "destroying paging queues\n");
+ list_for_each_entry_safe(pqueue, tmp, &device->pqueue_list_head,
+ pqueue_list_entry) {
+ dxgpagingqueue_destroy(pqueue);
+ }
+ }
+
+ /* Guest handles need to be released before the host handles */
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ if (device->handle_valid) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGDEVICE, device->handle);
+ device_handle = device->handle;
+ device->handle_valid = 0;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (device_handle.v) {
+ up_write(&device->device_lock);
+ if (dxgadapter_acquire_lock_shared(adapter) == 0) {
+ dxgvmb_send_destroy_device(adapter, process,
+ device_handle);
+ dxgadapter_release_lock_shared(adapter);
+ }
+ down_write(&device->device_lock);
+ }
+
+cleanup:
+
+ if (device->adapter) {
+ dxgprocess_adapter_remove_device(device);
+ kref_put(&device->adapter->adapter_kref, dxgadapter_release);
+ device->adapter = NULL;
+ }
+
+ up_write(&device->device_lock);
+
+ kref_put(&device->device_kref, dxgdevice_release);
+ dev_dbg(dxgglobaldev, "dxgdevice_destroy_end\n");
+}
+
+int dxgdevice_acquire_lock_shared(struct dxgdevice *device)
+{
+ down_read(&device->device_lock);
+ if (!dxgdevice_is_active(device)) {
+ up_read(&device->device_lock);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void dxgdevice_release_lock_shared(struct dxgdevice *device)
+{
+ up_read(&device->device_lock);
+}
+
+bool dxgdevice_is_active(struct dxgdevice *device)
+{
+ return device->object_state == DXGOBJECTSTATE_ACTIVE;
+}
+
+void dxgdevice_acquire_context_list_lock(struct dxgdevice *device)
+{
+ down_write(&device->context_list_lock);
+}
+
+void dxgdevice_release_context_list_lock(struct dxgdevice *device)
+{
+ up_write(&device->context_list_lock);
+}
+
+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *device)
+{
+ down_write(&device->alloc_list_lock);
+}
+
+void dxgdevice_release_alloc_list_lock(struct dxgdevice *device)
+{
+ up_write(&device->alloc_list_lock);
+}
+
+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *device)
+{
+ down_read(&device->alloc_list_lock);
+}
+
+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *device)
+{
+ up_read(&device->alloc_list_lock);
+}
+
+void dxgdevice_add_context(struct dxgdevice *device, struct dxgcontext *context)
+{
+ down_write(&device->context_list_lock);
+ list_add_tail(&context->context_list_entry, &device->context_list_head);
+ up_write(&device->context_list_lock);
+}
+
+void dxgdevice_remove_context(struct dxgdevice *device,
+ struct dxgcontext *context)
+{
+ if (context->context_list_entry.next) {
+ list_del(&context->context_list_entry);
+ context->context_list_entry.next = NULL;
+ }
+}
+
+void dxgdevice_add_alloc(struct dxgdevice *device, struct dxgallocation *alloc)
+{
+ dxgdevice_acquire_alloc_list_lock(device);
+ list_add_tail(&alloc->alloc_list_entry, &device->alloc_list_head);
+ kref_get(&device->device_kref);
+ alloc->owner.device = device;
+ dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_alloc(struct dxgdevice *device,
+ struct dxgallocation *alloc)
+{
+ if (alloc->alloc_list_entry.next) {
+ list_del(&alloc->alloc_list_entry);
+ alloc->alloc_list_entry.next = NULL;
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+}
+
+void dxgdevice_remove_alloc_safe(struct dxgdevice *device,
+ struct dxgallocation *alloc)
+{
+ dxgdevice_acquire_alloc_list_lock(device);
+ dxgdevice_remove_alloc(device, alloc);
+ dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_add_resource(struct dxgdevice *device, struct dxgresource *res)
+{
+ dxgdevice_acquire_alloc_list_lock(device);
+ list_add_tail(&res->resource_list_entry, &device->resource_list_head);
+ kref_get(&device->device_kref);
+ dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_resource(struct dxgdevice *device,
+ struct dxgresource *res)
+{
+ if (res->resource_list_entry.next) {
+ list_del(&res->resource_list_entry);
+ res->resource_list_entry.next = NULL;
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+}
+
+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter)
+{
+ struct dxgsharedresource *resource;
+
+ resource = vzalloc(sizeof(*resource));
+ if (resource) {
+ INIT_LIST_HEAD(&resource->resource_list_head);
+ kref_init(&resource->sresource_kref);
+ mutex_init(&resource->fd_mutex);
+ resource->adapter = adapter;
+ }
+ return resource;
+}
+
+void dxgsharedresource_destroy(struct kref *refcount)
+{
+ struct dxgsharedresource *resource;
+
+ resource = container_of(refcount, struct dxgsharedresource,
+ sresource_kref);
+ if (resource->runtime_private_data)
+ vfree(resource->runtime_private_data);
+ if (resource->resource_private_data)
+ vfree(resource->resource_private_data);
+ if (resource->alloc_private_data_sizes)
+ vfree(resource->alloc_private_data_sizes);
+ if (resource->alloc_private_data)
+ vfree(resource->alloc_private_data);
+ vfree(resource);
+}
+
+void dxgsharedresource_add_resource(struct dxgsharedresource *shared_resource,
+ struct dxgresource *resource)
+{
+ down_write(&shared_resource->adapter->shared_resource_list_lock);
+ dev_dbg(dxgglobaldev, "%s: %p %p", __func__, shared_resource, resource);
+ list_add_tail(&resource->shared_resource_list_entry,
+ &shared_resource->resource_list_head);
+ kref_get(&shared_resource->sresource_kref);
+ kref_get(&resource->resource_kref);
+ resource->shared_owner = shared_resource;
+ up_write(&shared_resource->adapter->shared_resource_list_lock);
+}
+
+void dxgsharedresource_remove_resource(struct dxgsharedresource
+ *shared_resource,
+ struct dxgresource *resource)
+{
+ struct dxgadapter *adapter = shared_resource->adapter;
+
+ down_write(&adapter->shared_resource_list_lock);
+ dev_dbg(dxgglobaldev, "%s: %p %p", __func__, shared_resource, resource);
+ if (resource->shared_resource_list_entry.next) {
+ list_del(&resource->shared_resource_list_entry);
+ resource->shared_resource_list_entry.next = NULL;
+ kref_put(&shared_resource->sresource_kref,
+ dxgsharedresource_destroy);
+ resource->shared_owner = NULL;
+ kref_put(&resource->resource_kref, dxgresource_release);
+ }
+ up_write(&adapter->shared_resource_list_lock);
+}
+
+struct dxgresource *dxgresource_create(struct dxgdevice *device)
+{
+ struct dxgresource *resource = vzalloc(sizeof(struct dxgresource));
+
+ if (resource) {
+ kref_init(&resource->resource_kref);
+ resource->device = device;
+ resource->process = device->process;
+ resource->object_state = DXGOBJECTSTATE_ACTIVE;
+ mutex_init(&resource->resource_mutex);
+ INIT_LIST_HEAD(&resource->alloc_list_head);
+ dxgdevice_add_resource(device, resource);
+ }
+ return resource;
+}
+
+void dxgresource_free_handle(struct dxgresource *resource)
+{
+ struct dxgallocation *alloc;
+ struct dxgprocess *process;
+
+ if (resource->handle_valid) {
+ process = resource->device->process;
+ hmgrtable_free_handle_safe(&process->handle_table,
+ HMGRENTRY_TYPE_DXGRESOURCE,
+ resource->handle);
+ resource->handle_valid = 0;
+ }
+ list_for_each_entry(alloc, &resource->alloc_list_head,
+ alloc_list_entry) {
+ dxgallocation_free_handle(alloc);
+ }
+}
+
+void dxgresource_destroy(struct dxgresource *resource)
+{
+ /* device->alloc_list_lock is held */
+ struct dxgallocation *alloc;
+ struct dxgallocation *tmp;
+ struct d3dkmt_destroyallocation2 args = { };
+ int destroyed = test_and_set_bit(0, &resource->flags);
+ struct dxgdevice *device = resource->device;
+ struct dxgsharedresource *shared_resource;
+
+ if (!destroyed) {
+ dxgresource_free_handle(resource);
+ if (resource->handle.v) {
+ args.device = device->handle;
+ args.resource = resource->handle;
+ dxgvmb_send_destroy_allocation(device->process,
+ device, &args, NULL);
+ resource->handle.v = 0;
+ }
+ list_for_each_entry_safe(alloc, tmp, &resource->alloc_list_head,
+ alloc_list_entry) {
+ dxgallocation_destroy(alloc);
+ }
+ dxgdevice_remove_resource(device, resource);
+ shared_resource = resource->shared_owner;
+ if (shared_resource) {
+ dxgsharedresource_remove_resource(shared_resource,
+ resource);
+ resource->shared_owner = NULL;
+ }
+ }
+ kref_put(&resource->resource_kref, dxgresource_release);
+}
+
+void dxgresource_release(struct kref *refcount)
+{
+ struct dxgresource *resource;
+
+ resource = container_of(refcount, struct dxgresource, resource_kref);
+ vfree(resource);
+}
+
+bool dxgresource_is_active(struct dxgresource *resource)
+{
+ return resource->object_state == DXGOBJECTSTATE_ACTIVE;
+}
+
+int dxgresource_add_alloc(struct dxgresource *resource,
+ struct dxgallocation *alloc)
+{
+ int ret = -ENODEV;
+ struct dxgdevice *device = resource->device;
+
+ dxgdevice_acquire_alloc_list_lock(device);
+ if (dxgresource_is_active(resource)) {
+ list_add_tail(&alloc->alloc_list_entry,
+ &resource->alloc_list_head);
+ alloc->owner.resource = resource;
+ ret = 0;
+ }
+ alloc->resource_owner = 1;
+ dxgdevice_release_alloc_list_lock(device);
+ return ret;
+}
+
+void dxgresource_remove_alloc(struct dxgresource *resource,
+ struct dxgallocation *alloc)
+{
+ if (alloc->alloc_list_entry.next) {
+ list_del(&alloc->alloc_list_entry);
+ alloc->alloc_list_entry.next = NULL;
+ }
+}
+
+void dxgresource_remove_alloc_safe(struct dxgresource *resource,
+ struct dxgallocation *alloc)
+{
+ dxgdevice_acquire_alloc_list_lock(resource->device);
+ dxgresource_remove_alloc(resource, alloc);
+ dxgdevice_release_alloc_list_lock(resource->device);
+}
+
+void dxgdevice_release(struct kref *refcount)
+{
+ struct dxgdevice *device;
+
+ device = container_of(refcount, struct dxgdevice, device_kref);
+ vfree(device);
+}
+
+void dxgdevice_add_paging_queue(struct dxgdevice *device,
+ struct dxgpagingqueue *entry)
+{
+ dxgdevice_acquire_alloc_list_lock(device);
+ list_add_tail(&entry->pqueue_list_entry, &device->pqueue_list_head);
+ dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue)
+{
+ struct dxgdevice *device = pqueue->device;
+
+ dxgdevice_acquire_alloc_list_lock(device);
+ if (pqueue->pqueue_list_entry.next) {
+ list_del(&pqueue->pqueue_list_entry);
+ pqueue->pqueue_list_entry.next = NULL;
+ }
+ dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_add_syncobj(struct dxgdevice *device,
+ struct dxgsyncobject *syncobj)
+{
+ dxgdevice_acquire_alloc_list_lock(device);
+ list_add_tail(&syncobj->syncobj_list_entry, &device->syncobj_list_head);
+ kref_get(&syncobj->syncobj_kref);
+ dxgdevice_release_alloc_list_lock(device);
+}
+
+void dxgdevice_remove_syncobj(struct dxgsyncobject *entry)
+{
+ struct dxgdevice *device = entry->device;
+
+ dxgdevice_acquire_alloc_list_lock(device);
+ if (entry->syncobj_list_entry.next) {
+ list_del(&entry->syncobj_list_entry);
+ entry->syncobj_list_entry.next = NULL;
+ kref_put(&entry->syncobj_kref, dxgsyncobject_release);
+ }
+ dxgdevice_release_alloc_list_lock(device);
+ kref_put(&device->device_kref, dxgdevice_release);
+ entry->device = NULL;
+}
+
+struct dxgcontext *dxgcontext_create(struct dxgdevice *device)
+{
+ struct dxgcontext *context = vzalloc(sizeof(struct dxgcontext));
+
+ if (context) {
+ kref_init(&context->context_kref);
+ context->device = device;
+ context->process = device->process;
+ context->device_handle = device->handle;
+ kref_get(&device->device_kref);
+ INIT_LIST_HEAD(&context->hwqueue_list_head);
+ init_rwsem(&context->hwqueue_list_lock);
+ dxgdevice_add_context(device, context);
+ context->object_state = DXGOBJECTSTATE_ACTIVE;
+ }
+ return context;
+}
+
+/*
+ * Called when the device context list lock is held
+ */
+void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context)
+{
+ struct dxghwqueue *hwqueue;
+ struct dxghwqueue *tmp;
+
+ dev_dbg(dxgglobaldev, "%s %p\n", __func__, context);
+ context->object_state = DXGOBJECTSTATE_DESTROYED;
+ if (context->device) {
+ if (context->handle.v) {
+ hmgrtable_free_handle_safe(&process->handle_table,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ context->handle);
+ }
+ dxgdevice_remove_context(context->device, context);
+ kref_put(&context->device->device_kref, dxgdevice_release);
+ }
+ list_for_each_entry_safe(hwqueue, tmp, &context->hwqueue_list_head,
+ hwqueue_list_entry) {
+ dxghwqueue_destroy(process, hwqueue);
+ }
+ kref_put(&context->context_kref, dxgcontext_release);
+}
+
+void dxgcontext_destroy_safe(struct dxgprocess *process,
+ struct dxgcontext *context)
+{
+ struct dxgdevice *device = context->device;
+
+ dxgdevice_acquire_context_list_lock(device);
+ dxgcontext_destroy(process, context);
+ dxgdevice_release_context_list_lock(device);
+}
+
+bool dxgcontext_is_active(struct dxgcontext *context)
+{
+ return context->object_state == DXGOBJECTSTATE_ACTIVE;
+}
+
+void dxgcontext_release(struct kref *refcount)
+{
+ struct dxgcontext *context;
+
+ context = container_of(refcount, struct dxgcontext, context_kref);
+ vfree(context);
+}
+
+int dxgcontext_add_hwqueue(struct dxgcontext *context,
+ struct dxghwqueue *hwqueue)
+{
+ int ret = 0;
+
+ down_write(&context->hwqueue_list_lock);
+ if (dxgcontext_is_active(context))
+ list_add_tail(&hwqueue->hwqueue_list_entry,
+ &context->hwqueue_list_head);
+ else
+ ret = -ENODEV;
+ up_write(&context->hwqueue_list_lock);
+ return ret;
+}
+
+void dxgcontext_remove_hwqueue(struct dxgcontext *context,
+ struct dxghwqueue *hwqueue)
+{
+ if (hwqueue->hwqueue_list_entry.next) {
+ list_del(&hwqueue->hwqueue_list_entry);
+ hwqueue->hwqueue_list_entry.next = NULL;
+ }
+}
+
+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *context,
+ struct dxghwqueue *hwqueue)
+{
+ down_write(&context->hwqueue_list_lock);
+ dxgcontext_remove_hwqueue(context, hwqueue);
+ up_write(&context->hwqueue_list_lock);
+}
+
+struct dxgallocation *dxgallocation_create(struct dxgprocess *process)
+{
+ struct dxgallocation *alloc = vzalloc(sizeof(struct dxgallocation));
+
+ if (alloc)
+ alloc->process = process;
+ return alloc;
+}
+
+void dxgallocation_stop(struct dxgallocation *alloc)
+{
+ if (alloc->pages) {
+ release_pages(alloc->pages, alloc->num_pages);
+ vfree(alloc->pages);
+ alloc->pages = NULL;
+ }
+ dxgprocess_ht_lock_exclusive_down(alloc->process);
+ if (alloc->cpu_address_mapped) {
+ dxg_unmap_iospace(alloc->cpu_address,
+ alloc->num_pages << PAGE_SHIFT);
+ alloc->cpu_address_mapped = false;
+ alloc->cpu_address = NULL;
+ alloc->cpu_address_refcount = 0;
+ }
+ dxgprocess_ht_lock_exclusive_up(alloc->process);
+}
+
+void dxgallocation_free_handle(struct dxgallocation *alloc)
+{
+ dxgprocess_ht_lock_exclusive_down(alloc->process);
+ if (alloc->handle_valid) {
+ hmgrtable_free_handle(&alloc->process->handle_table,
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ alloc->alloc_handle);
+ alloc->handle_valid = 0;
+ }
+ dxgprocess_ht_lock_exclusive_up(alloc->process);
+}
+
+void dxgallocation_destroy(struct dxgallocation *alloc)
+{
+ struct dxgprocess *process = alloc->process;
+ struct d3dkmt_destroyallocation2 args = { };
+
+ dxgallocation_stop(alloc);
+ if (alloc->resource_owner)
+ dxgresource_remove_alloc(alloc->owner.resource, alloc);
+ else if (alloc->owner.device)
+ dxgdevice_remove_alloc(alloc->owner.device, alloc);
+ dxgallocation_free_handle(alloc);
+ if (alloc->alloc_handle.v && !alloc->resource_owner) {
+ args.device = alloc->owner.device->handle;
+ args.alloc_count = 1;
+ dxgvmb_send_destroy_allocation(process,
+ alloc->owner.device,
+ &args, &alloc->alloc_handle);
+ }
+
+ if (alloc->priv_drv_data)
+ vfree(alloc->priv_drv_data);
+ if (alloc->cpu_address_mapped)
+ pr_err("Alloc IO space is mapped: %p", alloc);
+ vfree(alloc);
+}
+
+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device)
+{
+ struct dxgpagingqueue *pqueue;
+
+ pqueue = vzalloc(sizeof(*pqueue));
+ if (pqueue) {
+ pqueue->device = device;
+ pqueue->process = device->process;
+ pqueue->device_handle = device->handle;
+ dxgdevice_add_paging_queue(device, pqueue);
+ }
+ return pqueue;
+}
+
+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue)
+{
+ int ret;
+
+ if (pqueue->mapped_address) {
+ ret = dxg_unmap_iospace(pqueue->mapped_address, PAGE_SIZE);
+ dev_dbg(dxgglobaldev, "fence is unmapped %d %p",
+ ret, pqueue->mapped_address);
+ pqueue->mapped_address = NULL;
+ }
+}
+
+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue)
+{
+ struct dxgprocess *process = pqueue->process;
+
+ dev_dbg(dxgglobaldev, "%s %p %x\n", __func__, pqueue, pqueue->handle.v);
+
+ dxgpagingqueue_stop(pqueue);
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ if (pqueue->handle.v) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ pqueue->handle);
+ pqueue->handle.v = 0;
+ }
+ if (pqueue->syncobj_handle.v) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_MONITOREDFENCE,
+ pqueue->syncobj_handle);
+ pqueue->syncobj_handle.v = 0;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ if (pqueue->device)
+ dxgdevice_remove_paging_queue(pqueue);
+ vfree(pqueue);
+}
+
+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process,
+ struct dxgadapter *adapter)
+{
+ struct dxgprocess_adapter *adapter_info;
+
+ adapter_info = vzalloc(sizeof(*adapter_info));
+ if (adapter_info) {
+ if (kref_get_unless_zero(&adapter->adapter_kref) == 0) {
+ pr_err("failed to acquire adapter reference");
+ goto cleanup;
+ }
+ adapter_info->adapter = adapter;
+ adapter_info->process = process;
+ adapter_info->refcount = 1;
+ mutex_init(&adapter_info->device_list_mutex);
+ INIT_LIST_HEAD(&adapter_info->device_list_head);
+ list_add_tail(&adapter_info->process_adapter_list_entry,
+ &process->process_adapter_list_head);
+ dxgadapter_add_process(adapter, adapter_info);
+ }
+ return adapter_info;
+cleanup:
+ if (adapter_info)
+ vfree(adapter_info);
+ return NULL;
+}
+
+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info)
+{
+ struct dxgdevice *device;
+
+ mutex_lock(&adapter_info->device_list_mutex);
+ list_for_each_entry(device, &adapter_info->device_list_head,
+ device_list_entry) {
+ dxgdevice_stop(device);
+ }
+ mutex_unlock(&adapter_info->device_list_mutex);
+}
+
+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info)
+{
+ struct dxgdevice *device;
+
+ mutex_lock(&adapter_info->device_list_mutex);
+ while (!list_empty(&adapter_info->device_list_head)) {
+ device = list_first_entry(&adapter_info->device_list_head,
+ struct dxgdevice, device_list_entry);
+ list_del(&device->device_list_entry);
+ device->device_list_entry.next = NULL;
+ mutex_unlock(&adapter_info->device_list_mutex);
+ dxgvmb_send_flush_device(device,
+ DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE);
+ dxgdevice_destroy(device);
+ mutex_lock(&adapter_info->device_list_mutex);
+ }
+ mutex_unlock(&adapter_info->device_list_mutex);
+
+ dxgadapter_remove_process(adapter_info);
+ kref_put(&adapter_info->adapter->adapter_kref, dxgadapter_release);
+ list_del(&adapter_info->process_adapter_list_entry);
+ vfree(adapter_info);
+}
+
+/*
+ * Must be called when dxgglobal::process_adapter_mutex is held
+ */
+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter_info)
+{
+ dev_dbg(dxgglobaldev, "%s %p %d",
+ __func__, adapter_info, adapter_info->refcount);
+ adapter_info->refcount--;
+ if (adapter_info->refcount == 0)
+ dxgprocess_adapter_destroy(adapter_info);
+}
+
+int dxgprocess_adapter_add_device(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct dxgdevice *device)
+{
+ struct dxgprocess_adapter *entry;
+ struct dxgprocess_adapter *adapter_info = NULL;
+ int ret = 0;
+
+ dxgglobal_acquire_process_adapter_lock();
+
+ list_for_each_entry(entry, &process->process_adapter_list_head,
+ process_adapter_list_entry) {
+ if (entry->adapter == adapter) {
+ adapter_info = entry;
+ break;
+ }
+ }
+ if (adapter_info == NULL) {
+ pr_err("failed to find process adapter info\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ mutex_lock(&adapter_info->device_list_mutex);
+ list_add_tail(&device->device_list_entry,
+ &adapter_info->device_list_head);
+ device->adapter_info = adapter_info;
+ mutex_unlock(&adapter_info->device_list_mutex);
+
+cleanup:
+
+ dxgglobal_release_process_adapter_lock();
+ return ret;
+}
+
+void dxgprocess_adapter_remove_device(struct dxgdevice *device)
+{
+ dev_dbg(dxgglobaldev, "%s %p\n", __func__, device);
+ mutex_lock(&device->adapter_info->device_list_mutex);
+ if (device->device_list_entry.next) {
+ list_del(&device->device_list_entry);
+ device->device_list_entry.next = NULL;
+ }
+ mutex_unlock(&device->adapter_info->device_list_mutex);
+}
+
+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
+ struct dxgsyncobject *so)
+{
+ struct dxgsharedsyncobject *syncobj;
+
+ syncobj = vzalloc(sizeof(*syncobj));
+ if (syncobj) {
+ kref_init(&syncobj->ssyncobj_kref);
+ INIT_LIST_HEAD(&syncobj->shared_syncobj_list_head);
+ syncobj->adapter = adapter;
+ syncobj->type = so->type;
+ syncobj->monitored_fence = so->monitored_fence;
+ dxgadapter_add_shared_syncobj(adapter, syncobj);
+ kref_get(&adapter->adapter_kref);
+ init_rwsem(&syncobj->syncobj_list_lock);
+ mutex_init(&syncobj->fd_mutex);
+ }
+ return syncobj;
+}
+
+void dxgsharedsyncobj_release(struct kref *refcount)
+{
+ struct dxgsharedsyncobject *syncobj;
+
+ syncobj = container_of(refcount, struct dxgsharedsyncobject,
+ ssyncobj_kref);
+ dev_dbg(dxgglobaldev, "Destroying shared sync object %p", syncobj);
+ if (syncobj->adapter) {
+ dxgadapter_remove_shared_syncobj(syncobj->adapter,
+ syncobj);
+ kref_put(&syncobj->adapter->adapter_kref,
+ dxgadapter_release);
+ }
+ vfree(syncobj);
+}
+
+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *shared,
+ struct dxgsyncobject *syncobj)
+{
+ dev_dbg(dxgglobaldev, "%s 0x%p 0x%p", __func__, shared, syncobj);
+ kref_get(&shared->ssyncobj_kref);
+ down_write(&shared->syncobj_list_lock);
+ list_add(&syncobj->shared_syncobj_list_entry,
+ &shared->shared_syncobj_list_head);
+ syncobj->shared_owner = shared;
+ up_write(&shared->syncobj_list_lock);
+}
+
+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *shared,
+ struct dxgsyncobject *syncobj)
+{
+ dev_dbg(dxgglobaldev, "%s 0x%p", __func__, shared);
+ down_write(&shared->syncobj_list_lock);
+ list_del(&syncobj->shared_syncobj_list_entry);
+ up_write(&shared->syncobj_list_lock);
+}
+
+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct dxgadapter *adapter,
+ enum
+ d3dddi_synchronizationobject_type
+ type,
+ struct
+ d3dddi_synchronizationobject_flags
+ flags)
+{
+ struct dxgsyncobject *syncobj;
+
+ syncobj = vzalloc(sizeof(*syncobj));
+ if (syncobj == NULL)
+ goto cleanup;
+ syncobj->type = type;
+ syncobj->process = process;
+ switch (type) {
+ case _D3DDDI_MONITORED_FENCE:
+ case _D3DDDI_PERIODIC_MONITORED_FENCE:
+ syncobj->monitored_fence = 1;
+ break;
+ case _D3DDDI_CPU_NOTIFICATION:
+ syncobj->cpu_event = 1;
+ syncobj->host_event = vzalloc(sizeof(struct dxghostevent));
+ if (syncobj->host_event == NULL)
+ goto cleanup;
+ break;
+ default:
+ break;
+ }
+ if (flags.shared) {
+ syncobj->shared = 1;
+ if (!flags.nt_security_sharing) {
+ dev_err(dxgglobaldev,
+ "%s: nt_security_sharing must be set",
+ __func__);
+ goto cleanup;
+ }
+ }
+
+ kref_init(&syncobj->syncobj_kref);
+
+ if (syncobj->monitored_fence) {
+ syncobj->device = device;
+ syncobj->device_handle = device->handle;
+ kref_get(&device->device_kref);
+ dxgdevice_add_syncobj(device, syncobj);
+ } else {
+ dxgadapter_add_syncobj(adapter, syncobj);
+ }
+ syncobj->adapter = adapter;
+ kref_get(&adapter->adapter_kref);
+
+ dev_dbg(dxgglobaldev, "%s 0x%p\n", __func__, syncobj);
+ return syncobj;
+cleanup:
+ if (syncobj->host_event)
+ vfree(syncobj->host_event);
+ if (syncobj)
+ vfree(syncobj);
+ return NULL;
+}
+
+void dxgsyncobject_destroy(struct dxgprocess *process,
+ struct dxgsyncobject *syncobj)
+{
+ int destroyed;
+ struct dxghosteventcpu *host_event;
+
+ dev_dbg(dxgglobaldev, "%s 0x%p", __func__, syncobj);
+
+ dxgsyncobject_stop(syncobj);
+
+ destroyed = test_and_set_bit(0, &syncobj->flags);
+ if (!destroyed) {
+ dev_dbg(dxgglobaldev, "Deleting handle: %x", syncobj->handle.v);
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ if (syncobj->handle.v) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
+ syncobj->handle);
+ syncobj->handle.v = 0;
+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release);
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (syncobj->cpu_event) {
+ host_event = syncobj->host_event;
+ if (host_event->cpu_event) {
+ eventfd_ctx_put(host_event->cpu_event);
+ if (host_event->hdr.event_id)
+ dxgglobal_remove_host_event(
+ &host_event->hdr);
+ host_event->cpu_event = NULL;
+ }
+ }
+ if (syncobj->monitored_fence)
+ dxgdevice_remove_syncobj(syncobj);
+ else
+ dxgadapter_remove_syncobj(syncobj);
+ if (syncobj->adapter) {
+ kref_put(&syncobj->adapter->adapter_kref,
+ dxgadapter_release);
+ syncobj->adapter = NULL;
+ }
+ }
+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release);
+}
+
+void dxgsyncobject_stop(struct dxgsyncobject *syncobj)
+{
+ int stopped = test_and_set_bit(1, &syncobj->flags);
+
+ if (!stopped) {
+ dev_dbg(dxgglobaldev, "stopping");
+ if (syncobj->monitored_fence) {
+ if (syncobj->mapped_address) {
+ int ret =
+ dxg_unmap_iospace(syncobj->mapped_address,
+ PAGE_SIZE);
+
+ (void)ret;
+ dev_dbg(dxgglobaldev, "unmap fence %d %p\n",
+ ret, syncobj->mapped_address);
+ syncobj->mapped_address = NULL;
+ }
+ }
+ }
+}
+
+void dxgsyncobject_release(struct kref *refcount)
+{
+ struct dxgsyncobject *syncobj;
+
+ syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref);
+ if (syncobj->shared_owner) {
+ dxgsharedsyncobj_remove_syncobj(syncobj->shared_owner,
+ syncobj);
+ kref_put(&syncobj->shared_owner->ssyncobj_kref,
+ dxgsharedsyncobj_release);
+ }
+ if (syncobj->host_event)
+ vfree(syncobj->host_event);
+ vfree(syncobj);
+}
+
+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *context)
+{
+ struct dxgprocess *process = context->device->process;
+ struct dxghwqueue *hwqueue = vzalloc(sizeof(*hwqueue));
+
+ if (hwqueue) {
+ kref_init(&hwqueue->hwqueue_kref);
+ hwqueue->context = context;
+ hwqueue->process = process;
+ hwqueue->device_handle = context->device->handle;
+ if (dxgcontext_add_hwqueue(context, hwqueue) < 0) {
+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release);
+ hwqueue = NULL;
+ } else {
+ kref_get(&context->context_kref);
+ }
+ }
+ return hwqueue;
+}
+
+void dxghwqueue_destroy(struct dxgprocess *process, struct dxghwqueue *hwqueue)
+{
+ dev_dbg(dxgglobaldev, "%s %p\n", __func__, hwqueue);
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ if (hwqueue->handle.v) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ hwqueue->handle);
+ hwqueue->handle.v = 0;
+ }
+ if (hwqueue->progress_fence_sync_object.v) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_MONITOREDFENCE,
+ hwqueue->progress_fence_sync_object);
+ hwqueue->progress_fence_sync_object.v = 0;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (hwqueue->progress_fence_mapped_address) {
+ dxg_unmap_iospace(hwqueue->progress_fence_mapped_address,
+ PAGE_SIZE);
+ hwqueue->progress_fence_mapped_address = NULL;
+ }
+ dxgcontext_remove_hwqueue_safe(hwqueue->context, hwqueue);
+
+ kref_put(&hwqueue->context->context_kref, dxgcontext_release);
+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release);
+}
+
+void dxghwqueue_release(struct kref *refcount)
+{
+ struct dxghwqueue *hwqueue;
+
+ hwqueue = container_of(refcount, struct dxghwqueue, hwqueue_kref);
+ vfree(hwqueue);
+}
diff --git a/virtio_dxgkrnl/dxgglobal.c b/virtio_dxgkrnl/dxgglobal.c
new file mode 100644
index 0000000..eb91f4a
--- /dev/null
+++ b/virtio_dxgkrnl/dxgglobal.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Interface with Linux kernel and the VM bus driver
+ *
+ */
+
+#include <linux/eventfd.h>
+#include <linux/sync_file.h>
+
+#include "dxgkrnl.h"
+#include "dxgsyncfile.h"
+#include "dxgvmbus.h"
+
+struct dxgglobal *dxgglobal;
+struct device *dxgglobaldev;
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+#undef dev_fmt
+#define dev_fmt(fmt) "dxgk: " fmt
+
+//
+// Interface from dxgglobal
+//
+
+struct vmbus_channel *dxgglobal_get_vmbus(void)
+{
+ return dxgglobal->channel.channel;
+}
+
+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void)
+{
+ return &dxgglobal->channel;
+}
+
+int dxgglobal_acquire_channel_lock(void)
+{
+ down_read(&dxgglobal->channel_lock);
+ if ((dxgglobal->channel.channel == NULL) && (dxgglobal->vdxgkrnl == NULL)) {
+ pr_err("Failed to acquire global channel lock");
+ return -ENODEV;
+ } else {
+ return 0;
+ }
+}
+
+void dxgglobal_release_channel_lock(void)
+{
+ up_read(&dxgglobal->channel_lock);
+}
+
+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state)
+{
+ if (state == DXGLOCK_EXCL)
+ down_write(&dxgglobal->adapter_list_lock);
+ else
+ down_read(&dxgglobal->adapter_list_lock);
+}
+
+void dxgglobal_release_adapter_list_lock(enum dxglockstate state)
+{
+ if (state == DXGLOCK_EXCL)
+ up_write(&dxgglobal->adapter_list_lock);
+ else
+ up_read(&dxgglobal->adapter_list_lock);
+}
+
+struct dxgadapter *find_pci_adapter(struct pci_dev *dev)
+{
+ struct dxgadapter *entry;
+ struct dxgadapter *adapter = NULL;
+
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL);
+
+ list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (dev == entry->pci_dev) {
+ adapter = entry;
+ break;
+ }
+ }
+
+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL);
+ return adapter;
+}
+
+void dxgglobal_add_host_event(struct dxghostevent *event)
+{
+ spin_lock_irq(&dxgglobal->host_event_list_mutex);
+ list_add_tail(&event->host_event_list_entry,
+ &dxgglobal->host_event_list_head);
+ spin_unlock_irq(&dxgglobal->host_event_list_mutex);
+}
+
+void dxgglobal_remove_host_event(struct dxghostevent *event)
+{
+ spin_lock_irq(&dxgglobal->host_event_list_mutex);
+ if (event->host_event_list_entry.next != NULL) {
+ list_del(&event->host_event_list_entry);
+ event->host_event_list_entry.next = NULL;
+ }
+ spin_unlock_irq(&dxgglobal->host_event_list_mutex);
+}
+
+void signal_host_cpu_event(struct dxghostevent *eventhdr)
+{
+ struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr;
+
+ if (event->remove_from_list ||
+ event->destroy_after_signal) {
+ list_del(&eventhdr->host_event_list_entry);
+ eventhdr->host_event_list_entry.next = NULL;
+ }
+ if (event->cpu_event) {
+ dev_dbg(dxgglobaldev, "signal cpu event\n");
+ eventfd_signal(event->cpu_event, 1);
+ if (event->destroy_after_signal)
+ eventfd_ctx_put(event->cpu_event);
+ } else {
+ dev_dbg(dxgglobaldev, "signal completion\n");
+ complete(event->completion_event);
+ }
+ if (event->destroy_after_signal) {
+ dev_dbg(dxgglobaldev, "destroying event %p\n",
+ event);
+ vfree(event);
+ }
+}
+
+void signal_dma_fence(struct dxghostevent *eventhdr)
+{
+ struct dxgsyncpoint *event = (struct dxgsyncpoint *)eventhdr;
+
+ event->fence_value++;
+ list_del(&eventhdr->host_event_list_entry);
+ dma_fence_signal(&event->base);
+}
+
+void set_guest_data(struct dxgkvmb_command_host_to_vm *packet,
+ u32 packet_length)
+{
+ struct dxgkvmb_command_setguestdata *command = (void *)packet;
+
+ dev_dbg(dxgglobaldev, "%s: %d %d %p %p", __func__,
+ command->data_type,
+ command->data32,
+ command->guest_pointer,
+ &dxgglobal->device_state_counter);
+ if (command->data_type == SETGUESTDATA_DATATYPE_DWORD &&
+ command->guest_pointer == &dxgglobal->device_state_counter &&
+ command->data32 != 0) {
+ atomic_inc(&dxgglobal->device_state_counter);
+ }
+}
+
+void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet,
+ u32 packet_length)
+{
+ struct dxgkvmb_command_signalguestevent *command = (void *)packet;
+
+ if (packet_length < sizeof(struct dxgkvmb_command_signalguestevent)) {
+ pr_err("invalid packet size");
+ return;
+ }
+ if (command->event == 0) {
+ pr_err("invalid event pointer");
+ return;
+ }
+ dxgglobal_signal_host_event(command->event);
+}
+
+void dxgglobal_signal_host_event(u64 event_id)
+{
+ struct dxghostevent *event;
+ unsigned long flags;
+
+ dev_dbg(dxgglobaldev, "%s %lld\n", __func__, event_id);
+
+ spin_lock_irqsave(&dxgglobal->host_event_list_mutex, flags);
+ list_for_each_entry(event, &dxgglobal->host_event_list_head,
+ host_event_list_entry) {
+ if (event->event_id == event_id) {
+ dev_dbg(dxgglobaldev, "found event to signal %lld\n",
+ event_id);
+ if (event->event_type == dxghostevent_cpu_event)
+ signal_host_cpu_event(event);
+ else if (event->event_type == dxghostevent_dma_fence)
+ signal_dma_fence(event);
+ else
+ pr_err("Unknown host event type");
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dxgglobal->host_event_list_mutex, flags);
+ dev_dbg(dxgglobaldev, "dxgglobal_signal_host_event_end %lld\n",
+ event_id);
+}
+
+struct dxghostevent *dxgglobal_get_host_event(u64 event_id)
+{
+ struct dxghostevent *entry;
+ struct dxghostevent *event = NULL;
+
+ spin_lock_irq(&dxgglobal->host_event_list_mutex);
+ list_for_each_entry(entry, &dxgglobal->host_event_list_head,
+ host_event_list_entry) {
+ if (entry->event_id == event_id) {
+ list_del(&entry->host_event_list_entry);
+ entry->host_event_list_entry.next = NULL;
+ event = entry;
+ break;
+ }
+ }
+ spin_unlock_irq(&dxgglobal->host_event_list_mutex);
+ return event;
+}
+
+u64 dxgglobal_new_host_event_id(void)
+{
+ return atomic64_inc_return(&dxgglobal->host_event_id);
+}
+
+void dxgglobal_acquire_process_adapter_lock(void)
+{
+ mutex_lock(&dxgglobal->process_adapter_mutex);
+}
+
+void dxgglobal_release_process_adapter_lock(void)
+{
+ mutex_unlock(&dxgglobal->process_adapter_mutex);
+}
+
+int dxgglobal_create_adapter(struct pci_dev *dev, struct winluid guid,
+ struct winluid host_vgpu_luid)
+{
+ struct dxgadapter *adapter;
+ int ret = 0;
+
+ adapter = vzalloc(sizeof(struct dxgadapter));
+ if (adapter == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ adapter->adapter_state = DXGADAPTER_STATE_WAITING_VMBUS;
+ adapter->host_vgpu_luid = host_vgpu_luid;
+ kref_init(&adapter->adapter_kref);
+ init_rwsem(&adapter->core_lock);
+
+ INIT_LIST_HEAD(&adapter->adapter_process_list_head);
+ INIT_LIST_HEAD(&adapter->shared_resource_list_head);
+ INIT_LIST_HEAD(&adapter->adapter_shared_syncobj_list_head);
+ INIT_LIST_HEAD(&adapter->syncobj_list_head);
+ init_rwsem(&adapter->shared_resource_list_lock);
+ adapter->pci_dev = dev;
+ adapter->luid = guid;
+
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL);
+
+ list_add_tail(&adapter->adapter_list_entry,
+ &dxgglobal->adapter_list_head);
+ dxgglobal->num_adapters++;
+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL);
+
+ dev_dbg(dxgglobaldev, "new adapter added %p %x-%x\n", adapter,
+ adapter->luid.a, adapter->luid.b);
+cleanup:
+ dev_dbg(dxgglobaldev, "%s end: %d", __func__, ret);
+ return ret;
+}
+
+void dxgglobal_start_adapters(void)
+{
+ struct dxgadapter *adapter;
+
+ if ((dxgglobal->hdev == NULL) && (dxgglobal->vdxgkrnl == NULL)) {
+ dev_dbg(dxgglobaldev, "Global channel is not ready");
+ return;
+ }
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL);
+ list_for_each_entry(adapter, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (adapter->adapter_state == DXGADAPTER_STATE_WAITING_VMBUS)
+ dxgadapter_start(adapter);
+ }
+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL);
+}
+
+void dxgglobal_stop_adapters(void)
+{
+ struct dxgadapter *adapter;
+
+ if (dxgglobal->hdev == NULL) {
+ dev_dbg(dxgglobaldev, "Global channel is not ready");
+ return;
+ }
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL);
+ list_for_each_entry(adapter, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE)
+ dxgadapter_stop(adapter);
+ }
+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL);
+}
+
+/*
+ * File operations
+ */
+
+static struct dxgprocess *dxgglobal_get_current_process(void)
+{
+ /*
+ * Find the DXG process for the current process.
+ * A new process is created if necessary.
+ */
+ struct dxgprocess *process = NULL;
+ struct dxgprocess *entry = NULL;
+
+ mutex_lock(&dxgglobal->plistmutex);
+ list_for_each_entry(entry, &dxgglobal->plisthead, plistentry) {
+ /* All threads of a process have the same thread group ID */
+ if (entry->process->tgid == current->tgid) {
+ if (kref_get_unless_zero(&entry->process_kref)) {
+ process = entry;
+ dev_dbg(dxgglobaldev, "found dxgprocess");
+ } else {
+ dev_dbg(dxgglobaldev, "process is destroyed");
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dxgglobal->plistmutex);
+
+ if (process == NULL)
+ process = dxgprocess_create();
+
+ return process;
+}
+
+static int dxgk_open(struct inode *n, struct file *f)
+{
+ int ret = 0;
+ struct dxgprocess *process;
+
+ dev_dbg(dxgglobaldev, "%s %p %d %d",
+ __func__, f, current->pid, current->tgid);
+
+
+ /* Find/create a dxgprocess structure for this process */
+ process = dxgglobal_get_current_process();
+
+ if (process) {
+ f->private_data = process;
+ } else {
+ dev_dbg(dxgglobaldev, "cannot create dxgprocess for open\n");
+ ret = -EBADF;
+ }
+
+ dev_dbg(dxgglobaldev, "%s end %x", __func__, ret);
+ return ret;
+}
+
+static int dxgk_release(struct inode *n, struct file *f)
+{
+ struct dxgprocess *process;
+
+ process = (struct dxgprocess *)f->private_data;
+ dev_dbg(dxgglobaldev, "%s %p, %p", __func__, f, process);
+
+ if (process == NULL)
+ return -EINVAL;
+
+ kref_put(&process->process_kref, dxgprocess_release);
+
+ f->private_data = NULL;
+ return 0;
+}
+
+static ssize_t dxgk_read(struct file *f, char __user *s, size_t len,
+ loff_t *o)
+{
+ dev_dbg(dxgglobaldev, "file read\n");
+ return 0;
+}
+
+static ssize_t dxgk_write(struct file *f, const char __user *s, size_t len,
+ loff_t *o)
+{
+ dev_dbg(dxgglobaldev, "file write\n");
+ return len;
+}
+
+const struct file_operations dxgk_fops = {
+ .owner = THIS_MODULE,
+ .open = dxgk_open,
+ .release = dxgk_release,
+ .compat_ioctl = dxgk_compat_ioctl,
+ .unlocked_ioctl = dxgk_unlocked_ioctl,
+ .write = dxgk_write,
+ .read = dxgk_read,
+};
diff --git a/virtio_dxgkrnl/dxgglobal.h b/virtio_dxgkrnl/dxgglobal.h
new file mode 100644
index 0000000..6067bf0
--- /dev/null
+++ b/virtio_dxgkrnl/dxgglobal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Interface with Linux kernel and the VM bus driver
+ *
+ */
+
+extern const struct file_operations dxgk_fops;
+
+struct vmbus_channel *dxgglobal_get_vmbus(void);
+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void);
+int dxgglobal_acquire_channel_lock(void);
+int dxgglobal_create_adapter(struct pci_dev *dev, struct winluid guid, struct winluid host_vgpu_luid);
+void dxgglobal_start_adapters(void);
+void dxgglobal_stop_adapters(void);
+void set_guest_data(struct dxgkvmb_command_host_to_vm *packet,
+ u32 packet_length);
+void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet,
+ u32 packet_length);
diff --git a/virtio_dxgkrnl/dxgkrnl.h b/virtio_dxgkrnl/dxgkrnl.h
new file mode 100644
index 0000000..bf3bfbe
--- /dev/null
+++ b/virtio_dxgkrnl/dxgkrnl.h
@@ -0,0 +1,972 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Headers for internal objects
+ *
+ */
+
+#ifndef _DXGKRNL_H
+#define _DXGKRNL_H
+
+#include <linux/uuid.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/refcount.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+
+struct dxgprocess;
+struct dxgadapter;
+struct dxgdevice;
+struct dxgcontext;
+struct dxgallocation;
+struct dxgresource;
+struct dxgsharedresource;
+struct dxgsyncobject;
+struct dxgsharedsyncobject;
+struct dxghwqueue;
+
+#include "misc.h"
+#include "hmgr.h"
+#include "include/uapi/d3dkmthk.h"
+
+struct dxgk_device_types {
+ u32 post_device:1;
+ u32 post_device_certain:1;
+ u32 software_device:1;
+ u32 soft_gpu_device:1;
+ u32 warp_device:1;
+ u32 bdd_device:1;
+ u32 support_miracast:1;
+ u32 mismatched_lda:1;
+ u32 indirect_display_device:1;
+ u32 xbox_one_device:1;
+ u32 child_id_support_dwm_clone:1;
+ u32 child_id_support_dwm_clone2:1;
+ u32 has_internal_panel:1;
+ u32 rfx_vgpu_device:1;
+ u32 virtual_render_device:1;
+ u32 support_preserve_boot_display:1;
+ u32 is_uefi_frame_buffer:1;
+ u32 removable_device:1;
+ u32 virtual_monitor_device:1;
+};
+
+enum dxgdevice_flushschedulerreason {
+ DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE = 4,
+};
+
+enum dxgobjectstate {
+ DXGOBJECTSTATE_CREATED,
+ DXGOBJECTSTATE_ACTIVE,
+ DXGOBJECTSTATE_STOPPED,
+ DXGOBJECTSTATE_DESTROYED,
+};
+
+struct dxgvmbuschannel {
+ struct vmbus_channel *channel;
+ struct hv_device *hdev;
+ struct dxgadapter *adapter;
+ spinlock_t packet_list_mutex;
+ struct list_head packet_list_head;
+ struct kmem_cache *packet_cache;
+ atomic64_t packet_request_id;
+};
+
+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev);
+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch);
+void dxgvmbuschannel_receive(void *ctx);
+int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel,
+ void *command,
+ u32 cmd_size);
+int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel,
+ void *command,
+ u32 cmd_size,
+ void *result,
+ u32 result_size);
+
+struct dxgpagingqueue {
+ struct dxgdevice *device;
+ struct dxgprocess *process;
+ struct list_head pqueue_list_entry;
+ struct d3dkmthandle device_handle;
+ struct d3dkmthandle handle;
+ struct d3dkmthandle syncobj_handle;
+ void *mapped_address;
+};
+
+/*
+ * The structure describes an event, which will be signaled by
+ * a message from host.
+ */
+enum dxghosteventtype {
+ dxghostevent_cpu_event = 1,
+ dxghostevent_dma_fence = 2
+};
+
+struct dxghostevent {
+ struct list_head host_event_list_entry;
+ u64 event_id;
+ enum dxghosteventtype event_type;
+};
+
+struct dxghosteventcpu {
+ struct dxghostevent hdr;
+ struct dxgprocess *process;
+ struct eventfd_ctx *cpu_event;
+ struct completion *completion_event;
+ bool destroy_after_signal;
+ bool remove_from_list;
+};
+
+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device);
+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue);
+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue);
+
+/*
+ * This is GPU synchronization object, which is used to synchronize execution
+ * between GPU contextx/hardware queues or for tracking GPU execution progress.
+ * A dxgsyncobject is created when somebody creates a syncobject or opens a
+ * shared syncobject.
+ * A syncobject belongs to an adapter, unless it is a cross-adapter object.
+ * Cross adapter syncobjects are currently not implemented.
+ *
+ * D3DDDI_MONITORED_FENCE and D3DDDI_PERIODIC_MONITORED_FENCE are called
+ * "device" syncobject, because the belong to a device (dxgdevice).
+ * Device syncobjects are inserted to a list in dxgdevice.
+ *
+ * A syncobject can be "shared", meaning that it could be opened by many
+ * processes.
+ *
+ * Shared syncobjects are inserted to a list in its owner
+ * (dxgsharedsyncobject).
+ * A syncobject can be shared by using a global handle or by using
+ * "NT security handle".
+ * When global handle sharing is used, the handle is created durinig object
+ * creation.
+ * When "NT security" is used, the handle for sharing is create be calling
+ * dxgk_share_objects. On Linux "NT handle" is represented by a file
+ * descriptor. FD points to dxgsharedsyncobject.
+ */
+struct dxgsyncobject {
+ struct kref syncobj_kref;
+ enum d3dddi_synchronizationobject_type type;
+ /*
+ * List entry in dxgdevice for device sync objects.
+ * List entry in dxgadapter for other objects
+ */
+ struct list_head syncobj_list_entry;
+ /* List entry in the dxgsharedsyncobject object for shared synobjects */
+ struct list_head shared_syncobj_list_entry;
+ /* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */
+ struct dxgadapter *adapter;
+ /*
+ * Pointer to the device, which was used to create the object.
+ * This is NULL for non-device syncbjects
+ */
+ struct dxgdevice *device;
+ struct dxgprocess *process;
+ /* Used by D3DDDI_CPU_NOTIFICATION objects */
+ struct dxghosteventcpu *host_event;
+ /* Owner object for shared syncobjects */
+ struct dxgsharedsyncobject *shared_owner;
+ /* CPU virtual address of the fence value for "device" syncobjects */
+ void *mapped_address;
+ /* Handle in the process handle table */
+ struct d3dkmthandle handle;
+ /* Cached handle of the device. Used to avoid device dereference. */
+ struct d3dkmthandle device_handle;
+ union {
+ struct {
+ /* Must be the first bit */
+ u32 destroyed:1;
+ /* Must be the second bit */
+ u32 stopped:1;
+ /* device syncobject */
+ u32 monitored_fence:1;
+ u32 cpu_event:1;
+ u32 shared:1;
+ u32 reserved:27;
+ };
+ long flags;
+ };
+};
+
+/*
+ * The structure defines an offered vGPU vm bus channel.
+ */
+struct dxgvgpuchannel {
+ struct list_head vgpu_ch_list_entry;
+ struct winluid adapter_luid;
+ struct hv_device *hdev;
+};
+
+/*
+ * The object is used as parent of all sync objects, created for a shared
+ * syncobject. When a shared syncobject is created without NT security, the
+ * handle in the global handle table will point to this object.
+ */
+struct dxgsharedsyncobject {
+ struct kref ssyncobj_kref;
+ /* Referenced by file descriptors */
+ int host_shared_handle_nt_reference;
+ /* Corresponding handle in the host global handle table */
+ struct d3dkmthandle host_shared_handle;
+ /*
+ * When the sync object is shared by NT handle, this is the
+ * corresponding handle in the host
+ */
+ struct d3dkmthandle host_shared_handle_nt;
+ /* Protects access to host_shared_handle_nt */
+ struct mutex fd_mutex;
+ struct rw_semaphore syncobj_list_lock;
+ struct list_head shared_syncobj_list_head;
+ struct list_head adapter_shared_syncobj_list_entry;
+ struct dxgadapter *adapter;
+ enum d3dddi_synchronizationobject_type type;
+ u32 monitored_fence:1;
+};
+
+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
+ struct dxgsyncobject
+ *syncobj);
+void dxgsharedsyncobj_release(struct kref *refcount);
+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
+ struct dxgsyncobject *syncobj);
+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
+ struct dxgsyncobject *syncobj);
+
+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct dxgadapter *adapter,
+ enum
+ d3dddi_synchronizationobject_type
+ type,
+ struct
+ d3dddi_synchronizationobject_flags
+ flags);
+void dxgsyncobject_destroy(struct dxgprocess *process,
+ struct dxgsyncobject *syncobj);
+void dxgsyncobject_stop(struct dxgsyncobject *syncobj);
+void dxgsyncobject_release(struct kref *refcount);
+
+extern struct device *dxgglobaldev;
+
+/*
+ * device_state_counter - incremented every time the execition state of
+ * a DXGDEVICE is changed in the host. Used to optimize access to the
+ * device execution state.
+ */
+struct dxgglobal {
+ struct dxgvmbuschannel channel;
+ struct delayed_work dwork;
+ struct hv_device *hdev;
+ struct virtio_dxgkrnl *vdxgkrnl;
+ u32 num_adapters;
+ u32 vmbus_ver; /* Interface version */
+ atomic_t device_state_counter;
+ struct resource *mem;
+ u64 mmiospace_base;
+ u64 mmiospace_size;
+ struct miscdevice dxgdevice;
+ struct mutex device_mutex;
+
+ /* list of created processes */
+ struct list_head plisthead;
+ struct mutex plistmutex;
+
+ /* list of created adapters */
+ struct list_head adapter_list_head;
+ struct rw_semaphore adapter_list_lock;
+
+ /* List of all current threads for lock order tracking. */
+ struct mutex thread_info_mutex;
+ struct list_head thread_info_list_head;
+
+ /*
+ * List of the vGPU VM bus channels (dxgvgpuchannel)
+ * Protected by device_mutex
+ */
+ struct list_head vgpu_ch_list_head;
+
+ /* protects acces to the global VM bus channel */
+ struct rw_semaphore channel_lock;
+
+ /* protects the dxgprocess_adapter lists */
+ struct mutex process_adapter_mutex;
+
+ /* list of events, waiting to be signaled by the host */
+ struct list_head host_event_list_head;
+ spinlock_t host_event_list_mutex;
+ atomic64_t host_event_id;
+
+ /* Handle table for shared objects */
+ struct hmgrtable handle_table;
+
+ bool dxg_dev_initialized;
+ bool vmbus_registered;
+ bool pci_registered;
+ bool global_channel_initialized;
+ bool async_msg_enabled;
+ bool map_guest_pages_enabled;
+};
+
+extern struct dxgglobal *dxgglobal;
+
+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state);
+void dxgglobal_release_adapter_list_lock(enum dxglockstate state);
+int dxgglobal_init_global_channel(void);
+void dxgglobal_destroy_global_channel(void);
+struct vmbus_channel *dxgglobal_get_vmbus(void);
+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void);
+void dxgglobal_acquire_process_adapter_lock(void);
+void dxgglobal_release_process_adapter_lock(void);
+void dxgglobal_add_host_event(struct dxghostevent *hostevent);
+void dxgglobal_remove_host_event(struct dxghostevent *hostevent);
+u64 dxgglobal_new_host_event_id(void);
+void dxgglobal_signal_host_event(u64 event_id);
+struct dxghostevent *dxgglobal_get_host_event(u64 event_id);
+int dxgglobal_acquire_channel_lock(void);
+void dxgglobal_release_channel_lock(void);
+
+/*
+ * Describes adapter information for each process
+ */
+struct dxgprocess_adapter {
+ /* Entry in dxgadapter::adapter_process_list_head */
+ struct list_head adapter_process_list_entry;
+ /* Entry in dxgprocess::process_adapter_list_head */
+ struct list_head process_adapter_list_entry;
+ /* List of all dxgdevice objects created for the process on adapter */
+ struct list_head device_list_head;
+ struct mutex device_list_mutex;
+ struct dxgadapter *adapter;
+ struct dxgprocess *process;
+ int refcount;
+};
+
+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process,
+ struct dxgadapter
+ *adapter);
+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter);
+int dxgprocess_adapter_add_device(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct dxgdevice *device);
+void dxgprocess_adapter_remove_device(struct dxgdevice *device);
+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info);
+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info);
+
+struct dxgprocess {
+ /*
+ * Process list entry in dxgglobal.
+ * Protected by the dxgglobal->plistmutex.
+ */
+ struct list_head plistentry;
+ struct task_struct *process;
+ pid_t pid;
+ pid_t tgid;
+ /* how many time the process was opened */
+ struct kref process_kref;
+ /*
+ * This handle table is used for all objects except dxgadapter
+ * The handle table lock order is higher than the local_handle_table
+ * lock
+ */
+ struct hmgrtable handle_table;
+ /*
+ * This handle table is used for dxgadapter objects.
+ * The handle table lock order is lowest.
+ */
+ struct hmgrtable local_handle_table;
+ struct d3dkmthandle host_handle;
+
+ /* List of opened adapters (dxgprocess_adapter) */
+ struct list_head process_adapter_list_head;
+
+ struct hmgrtable *test_handle_table[2];
+ struct mutex process_mutex;
+};
+
+struct dxgprocess *dxgprocess_create(void);
+void dxgprocess_destroy(struct dxgprocess *process);
+void dxgprocess_release(struct kref *refcount);
+int dxgprocess_open_adapter(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle *handle);
+int dxgprocess_close_adapter(struct dxgprocess *process,
+ struct d3dkmthandle handle);
+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process,
+ struct d3dkmthandle handle);
+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process,
+ struct d3dkmthandle handle,
+ bool error_on_missing);
+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process,
+ struct d3dkmthandle handle);
+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process,
+ enum hmgrentry_type t,
+ struct d3dkmthandle h);
+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process);
+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process);
+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process);
+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process);
+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess
+ *process,
+ struct dxgadapter
+ *adapter);
+
+enum dxgadapter_state {
+ DXGADAPTER_STATE_ACTIVE = 0,
+ DXGADAPTER_STATE_STOPPED = 1,
+ DXGADAPTER_STATE_WAITING_VMBUS = 2,
+};
+
+/*
+ * This object represents the grapchis adapter.
+ * Objects, which take reference on the adapter:
+ * - dxgglobal
+ * - dxgdevice
+ * - adapter handle (struct d3dkmthandle)
+ */
+struct dxgadapter {
+ struct rw_semaphore core_lock;
+ struct kref adapter_kref;
+ /* Entry in the list of adapters in dxgglobal */
+ struct list_head adapter_list_entry;
+ /* The list of dxgprocess_adapter entries */
+ struct list_head adapter_process_list_head;
+ /* List of all dxgsharedresource objects */
+ struct list_head shared_resource_list_head;
+ /* List of all dxgsharedsyncobject objects */
+ struct list_head adapter_shared_syncobj_list_head;
+ /* List of all non-device dxgsyncobject objects */
+ struct list_head syncobj_list_head;
+ /* This lock protects shared resource and syncobject lists */
+ struct rw_semaphore shared_resource_list_lock;
+ struct pci_dev *pci_dev;
+ struct hv_device *hv_dev;
+ struct dxgvmbuschannel channel;
+ struct d3dkmthandle host_handle;
+ enum dxgadapter_state adapter_state;
+ struct winluid host_adapter_luid;
+ struct winluid host_vgpu_luid;
+ struct winluid luid; /* VM bus channel luid */
+ u16 device_description[80];
+ u16 device_instance_id[WIN_MAX_PATH];
+ bool stopping_adapter;
+};
+
+int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev);
+bool dxgadapter_is_active(struct dxgadapter *adapter);
+void dxgadapter_start(struct dxgadapter *adapter);
+void dxgadapter_stop(struct dxgadapter *adapter);
+void dxgadapter_release(struct kref *refcount);
+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter);
+void dxgadapter_release_lock_shared(struct dxgadapter *adapter);
+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter);
+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter);
+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter);
+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
+ struct dxgsharedsyncobject *so);
+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
+ struct dxgsharedsyncobject *so);
+void dxgadapter_add_syncobj(struct dxgadapter *adapter,
+ struct dxgsyncobject *so);
+void dxgadapter_remove_syncobj(struct dxgsyncobject *so);
+void dxgadapter_add_process(struct dxgadapter *adapter,
+ struct dxgprocess_adapter *process_info);
+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info);
+
+/*
+ * The object represent the device object.
+ * The following objects take reference on the device
+ * - dxgcontext
+ * - device handle (struct d3dkmthandle)
+ */
+struct dxgdevice {
+ enum dxgobjectstate object_state;
+ /* Device takes reference on the adapter */
+ struct dxgadapter *adapter;
+ struct dxgprocess_adapter *adapter_info;
+ struct dxgprocess *process;
+ /* Entry in the DGXPROCESS_ADAPTER device list */
+ struct list_head device_list_entry;
+ struct kref device_kref;
+ /* Protects destcruction of the device object */
+ struct rw_semaphore device_lock;
+ struct rw_semaphore context_list_lock;
+ struct list_head context_list_head;
+ /* List of device allocations */
+ struct rw_semaphore alloc_list_lock;
+ struct list_head alloc_list_head;
+ struct list_head resource_list_head;
+ /* List of paging queues. Protected by process handle table lock. */
+ struct list_head pqueue_list_head;
+ struct list_head syncobj_list_head;
+ struct d3dkmthandle handle;
+ enum d3dkmt_deviceexecution_state execution_state;
+ int execution_state_counter;
+ u32 handle_valid;
+};
+
+struct dxgdevice *dxgdevice_create(struct dxgadapter *a, struct dxgprocess *p);
+void dxgdevice_destroy(struct dxgdevice *device);
+void dxgdevice_stop(struct dxgdevice *device);
+int dxgdevice_acquire_lock_shared(struct dxgdevice *dev);
+void dxgdevice_release_lock_shared(struct dxgdevice *dev);
+void dxgdevice_release(struct kref *refcount);
+void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx);
+void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx);
+void dxgdevice_add_alloc(struct dxgdevice *dev, struct dxgallocation *a);
+void dxgdevice_remove_alloc(struct dxgdevice *dev, struct dxgallocation *a);
+void dxgdevice_remove_alloc_safe(struct dxgdevice *dev,
+ struct dxgallocation *a);
+void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res);
+void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res);
+void dxgdevice_add_paging_queue(struct dxgdevice *dev,
+ struct dxgpagingqueue *pqueue);
+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue);
+void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so);
+void dxgdevice_remove_syncobj(struct dxgsyncobject *so);
+bool dxgdevice_is_active(struct dxgdevice *dev);
+void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev);
+void dxgdevice_release_context_list_lock(struct dxgdevice *dev);
+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *dev);
+void dxgdevice_release_alloc_list_lock(struct dxgdevice *dev);
+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *dev);
+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *dev);
+
+/*
+ * The object represent the execution context of a device.
+ */
+struct dxgcontext {
+ enum dxgobjectstate object_state;
+ struct dxgdevice *device;
+ struct dxgprocess *process;
+ /* entry in the device context list */
+ struct list_head context_list_entry;
+ struct list_head hwqueue_list_head;
+ struct rw_semaphore hwqueue_list_lock;
+ struct kref context_kref;
+ struct d3dkmthandle handle;
+ struct d3dkmthandle device_handle;
+};
+
+struct dxgcontext *dxgcontext_create(struct dxgdevice *dev);
+void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx);
+void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx);
+void dxgcontext_release(struct kref *refcount);
+int dxgcontext_add_hwqueue(struct dxgcontext *ctx,
+ struct dxghwqueue *hq);
+void dxgcontext_remove_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq);
+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *ctx,
+ struct dxghwqueue *hq);
+bool dxgcontext_is_active(struct dxgcontext *ctx);
+
+/*
+ * The object represent the execution hardware queue of a device.
+ */
+struct dxghwqueue {
+ /* entry in the context hw queue list */
+ struct list_head hwqueue_list_entry;
+ struct kref hwqueue_kref;
+ struct dxgcontext *context;
+ struct dxgprocess *process;
+ struct d3dkmthandle progress_fence_sync_object;
+ struct d3dkmthandle handle;
+ struct d3dkmthandle device_handle;
+ void *progress_fence_mapped_address;
+};
+
+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *ctx);
+void dxghwqueue_destroy(struct dxgprocess *pr, struct dxghwqueue *hq);
+void dxghwqueue_release(struct kref *refcount);
+
+/*
+ * A shared resource object is created to track the list of dxgresource objects,
+ * which are opened for the same underlying shared resource.
+ * Objects are shared by using a file descriptor handle.
+ * FD is created by calling dxgk_share_objects and providing shandle to
+ * dxgsharedresource. The FD points to a dxgresource object, which is created
+ * by calling dxgk_open_resource_nt. dxgresource object is referenced by the
+ * FD.
+ *
+ * The object is referenced by every dxgresource in its list.
+ *
+ */
+struct dxgsharedresource {
+ /* Every dxgresource object in the resource list takes a reference */
+ struct kref sresource_kref;
+ struct dxgadapter *adapter;
+ /* List of dxgresource objects, opened for the shared resource. */
+ /* Protected by dxgadapter::shared_resource_list_lock */
+ struct list_head resource_list_head;
+ /* Entry in the list of dxgsharedresource in dxgadapter */
+ /* Protected by dxgadapter::shared_resource_list_lock */
+ struct list_head shared_resource_list_entry;
+ struct mutex fd_mutex;
+ /* Referenced by file descriptors */
+ int host_shared_handle_nt_reference;
+ /* Corresponding global handle in the host */
+ struct d3dkmthandle host_shared_handle;
+ /*
+ * When the sync object is shared by NT handle, this is the
+ * corresponding handle in the host
+ */
+ struct d3dkmthandle host_shared_handle_nt;
+ /* Values below are computed when the resource is sealed */
+ u32 runtime_private_data_size;
+ u32 alloc_private_data_size;
+ u32 resource_private_data_size;
+ u32 allocation_count;
+ union {
+ struct {
+ /* Cannot add new allocations */
+ u32 sealed:1;
+ u32 reserved:31;
+ };
+ long flags;
+ };
+ u32 *alloc_private_data_sizes;
+ u8 *alloc_private_data;
+ u8 *runtime_private_data;
+ u8 *resource_private_data;
+};
+
+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter);
+void dxgsharedresource_destroy(struct kref *refcount);
+void dxgsharedresource_add_resource(struct dxgsharedresource *sres,
+ struct dxgresource *res);
+void dxgsharedresource_remove_resource(struct dxgsharedresource *sres,
+ struct dxgresource *res);
+
+struct dxgresource {
+ struct kref resource_kref;
+ enum dxgobjectstate object_state;
+ struct d3dkmthandle handle;
+ struct list_head alloc_list_head;
+ struct list_head resource_list_entry;
+ struct list_head shared_resource_list_entry;
+ struct dxgdevice *device;
+ struct dxgprocess *process;
+ /* Protects adding allocations to resource and resource destruction */
+ struct mutex resource_mutex;
+ u64 private_runtime_handle;
+ union {
+ struct {
+ u32 destroyed:1; /* Must be the first */
+ u32 handle_valid:1;
+ u32 reserved:30;
+ };
+ long flags;
+ };
+ /* Owner of the shared resource */
+ struct dxgsharedresource *shared_owner;
+};
+
+struct dxgresource *dxgresource_create(struct dxgdevice *dev);
+void dxgresource_destroy(struct dxgresource *res);
+void dxgresource_free_handle(struct dxgresource *res);
+void dxgresource_release(struct kref *refcount);
+int dxgresource_add_alloc(struct dxgresource *res,
+ struct dxgallocation *a);
+void dxgresource_remove_alloc(struct dxgresource *res, struct dxgallocation *a);
+void dxgresource_remove_alloc_safe(struct dxgresource *res,
+ struct dxgallocation *a);
+bool dxgresource_is_active(struct dxgresource *res);
+
+struct privdata {
+ u32 data_size;
+ u8 data[1];
+};
+
+struct dxgallocation {
+ /* Entry in the device list or resource list (when resource exists) */
+ struct list_head alloc_list_entry;
+ /* Allocation owner */
+ union {
+ struct dxgdevice *device;
+ struct dxgresource *resource;
+ } owner;
+ struct dxgprocess *process;
+ /* Pointer to private driver data desc. Used for shared resources */
+ struct privdata *priv_drv_data;
+ struct d3dkmthandle alloc_handle;
+ /* Set to 1 when allocation belongs to resource. */
+ u32 resource_owner:1;
+ /* Set to 1 when 'cpu_address' is mapped to the IO space. */
+ u32 cpu_address_mapped:1;
+ /* Set to 1 when the allocatio is mapped as cached */
+ u32 cached:1;
+ u32 handle_valid:1;
+ /* GPADL address list for existing sysmem allocations */
+ u32 gpadl;
+ /* Number of pages in the 'pages' array */
+ u32 num_pages;
+ /*
+ * How many times dxgk_lock2 is called to allocation, which is mapped
+ * to IO space.
+ */
+ u32 cpu_address_refcount;
+ /*
+ * CPU address from the existing sysmem allocation, or
+ * mapped to the CPU visible backing store in the IO space
+ */
+ void *cpu_address;
+ /* Describes pages for the existing sysmem allocation */
+ struct page **pages;
+};
+
+struct dxgallocation *dxgallocation_create(struct dxgprocess *process);
+void dxgallocation_stop(struct dxgallocation *a);
+void dxgallocation_destroy(struct dxgallocation *a);
+void dxgallocation_free_handle(struct dxgallocation *a);
+
+void init_ioctls(void);
+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2);
+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2);
+
+int dxg_unmap_iospace(void *va, u32 size);
+static inline void guid_to_luid(guid_t *guid, struct winluid *luid)
+{
+ *luid = *(struct winluid *)&guid->b[0];
+}
+
+/*
+ * VM bus interface
+ *
+ */
+
+/*
+ * The interface version is used to ensure that the host and the guest use the
+ * same VM bus protocol. It needs to be incremented every time the VM bus
+ * interface changes. DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION is
+ * incremented each time the earlier versions of the interface are no longer
+ * compatible with the current version.
+ */
+#define DXGK_VMBUS_INTERFACE_VERSION_OLD 27
+#define DXGK_VMBUS_INTERFACE_VERSION 40
+#define DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION 16
+
+void dxgvmb_initialize(void);
+int dxgvmb_send_set_iospace_region(u64 start, u64 len, u32 shared_mem_gpadl);
+int dxgvmb_send_create_process(struct dxgprocess *process);
+int dxgvmb_send_destroy_process(struct d3dkmthandle process);
+int dxgvmb_send_open_adapter(struct dxgadapter *adapter);
+int dxgvmb_send_close_adapter(struct dxgadapter *adapter);
+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter);
+struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmt_createdevice *args);
+int dxgvmb_send_destroy_device(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmthandle h);
+int dxgvmb_send_flush_device(struct dxgdevice *device,
+ enum dxgdevice_flushschedulerreason reason);
+struct d3dkmthandle
+dxgvmb_send_create_context(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmt_createcontextvirtual
+ *args);
+int dxgvmb_send_destroy_context(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmthandle h);
+int dxgvmb_send_create_paging_queue(struct dxgprocess *pr,
+ struct dxgdevice *dev,
+ struct d3dkmt_createpagingqueue *args,
+ struct dxgpagingqueue *pq);
+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle h);
+int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev,
+ struct d3dkmt_createallocation *args,
+ struct d3dkmt_createallocation *__user inargs,
+ struct dxgresource *res,
+ struct dxgallocation **allocs,
+ struct d3dddi_allocationinfo *alloc_info,
+ struct d3dkmt_createstandardallocation *stda);
+int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev,
+ struct d3dkmt_destroyallocation2 *args,
+ struct d3dkmthandle *alloc_handles);
+int dxgvmb_send_make_resident(struct dxgprocess *pr, struct dxgadapter *adapter,
+ struct d3dddi_makeresident *args);
+int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgadapter *adapter,
+ struct d3dkmt_evict *args);
+int dxgvmb_send_submit_command(struct dxgprocess *pr,
+ struct dxgadapter *adapter,
+ struct d3dkmt_submitcommand *args);
+int dxgvmb_send_map_gpu_va(struct dxgprocess *pr, struct d3dkmthandle h,
+ struct dxgadapter *adapter,
+ struct d3dddi_mapgpuvirtualaddress *args);
+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *pr,
+ struct dxgadapter *adapter,
+ struct d3dddi_reservegpuvirtualaddress *args);
+int dxgvmb_send_free_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter,
+ struct d3dkmt_freegpuvirtualaddress *args);
+int dxgvmb_send_update_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter,
+ struct d3dkmt_updategpuvirtualaddress *args);
+int dxgvmb_send_create_sync_object(struct dxgprocess *pr,
+ struct dxgadapter *adapter,
+ struct d3dkmt_createsynchronizationobject2
+ *args, struct dxgsyncobject *so);
+int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr,
+ struct d3dkmthandle h);
+int dxgvmb_send_signal_sync_object(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dddicb_signalflags flags,
+ u64 legacy_fence_value,
+ struct d3dkmthandle context,
+ u32 object_count,
+ struct d3dkmthandle *object,
+ u32 context_count,
+ struct d3dkmthandle *contexts,
+ u32 fence_count, u64 *fences,
+ struct eventfd_ctx *cpu_event,
+ struct d3dkmthandle device);
+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle context,
+ u32 object_count,
+ struct d3dkmthandle *objects,
+ u64 *fences,
+ bool legacy_fence);
+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct
+ d3dkmt_waitforsynchronizationobjectfromcpu
+ *args,
+ bool user_address,
+ u64 cpu_event);
+int dxgvmb_send_lock2(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_lock2 *args,
+ struct d3dkmt_lock2 *__user outargs);
+int dxgvmb_send_unlock2(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_unlock2 *args);
+int dxgvmb_send_update_alloc_property(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dddi_updateallocproperty *args,
+ struct d3dddi_updateallocproperty *__user
+ inargs);
+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_markdeviceaserror *args);
+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_setallocationpriority *a);
+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_getallocationpriority *a);
+int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle context,
+ int priority, bool in_process);
+int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle context,
+ int *priority,
+ bool in_process);
+int dxgvmb_send_offer_allocations(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_offerallocations *args);
+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle device,
+ struct d3dkmt_reclaimallocations2 *args,
+ u64 __user *paging_fence_value);
+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle other_process,
+ struct
+ d3dkmt_changevideomemoryreservation
+ *args);
+int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_createhwqueue *args,
+ struct d3dkmt_createhwqueue *__user inargs,
+ struct dxghwqueue *hq);
+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle handle);
+int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryadapterinfo *args);
+int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_submitcommandtohwqueue *a);
+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryclockcalibration *a,
+ struct d3dkmt_queryclockcalibration
+ *__user inargs);
+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_flushheaptransitions *arg);
+int dxgvmb_send_open_sync_object(struct dxgprocess *process,
+ struct dxgvmbuschannel *channel,
+ struct d3dkmthandle h,
+ struct d3dkmthandle *ph);
+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
+ struct dxgvmbuschannel *channel,
+ struct d3dkmt_opensyncobjectfromnthandle2
+ *args,
+ struct dxgsyncobject *syncobj);
+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryallocationresidency
+ *args);
+int dxgvmb_send_escape(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_escape *args);
+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryvideomemoryinfo *args,
+ struct d3dkmt_queryvideomemoryinfo
+ *__user iargs);
+int dxgvmb_send_get_device_state(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_getdevicestate *args,
+ struct d3dkmt_getdevicestate *__user inargs);
+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
+ struct d3dkmthandle object,
+ struct d3dkmthandle *shared_handle);
+int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle);
+int dxgvmb_send_open_resource(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle device,
+ struct d3dkmthandle global_share,
+ u32 allocation_count,
+ u32 total_priv_drv_data_size,
+ struct d3dkmthandle *resource_handle,
+ struct d3dkmthandle *alloc_handles);
+int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device,
+ enum d3dkmdt_standardallocationtype t,
+ struct d3dkmdt_gdisurfacedata *data,
+ u32 physical_adapter_index,
+ u32 *alloc_priv_driver_size,
+ void *prive_alloc_data,
+ u32 *res_priv_data_size,
+ void *priv_res_data);
+int dxgvmb_send_query_statistics(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_querystatistics *args);
+int dxgvmb_send_share_object_with_host(struct dxgprocess *process,
+ struct d3dkmt_shareobjectwithhost *args);
+int dxgvmb_send_present_virtual(struct dxgprocess *process,
+ struct d3dkmt_presentvirtual *args,
+ __u64 acquire_semaphore_nthandle,
+ __u64 release_semaphore_nthandle,
+ __u64 composition_memory_nthandle);
+
+#endif
diff --git a/virtio_dxgkrnl/dxgprocess.c b/virtio_dxgkrnl/dxgprocess.c
new file mode 100644
index 0000000..9ce88e9
--- /dev/null
+++ b/virtio_dxgkrnl/dxgprocess.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * DXGPROCESS implementation
+ *
+ */
+
+#include "dxgkrnl.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+
+/*
+ * Creates a new dxgprocess object
+ * Must be called when dxgglobal->plistmutex is held
+ */
+struct dxgprocess *dxgprocess_create(void)
+{
+ struct dxgprocess *process;
+ int ret;
+
+ process = vzalloc(sizeof(struct dxgprocess));
+ if (process != NULL) {
+ dev_dbg(dxgglobaldev, "new dxgprocess created\n");
+ process->process = current;
+ process->pid = current->pid;
+ process->tgid = current->tgid;
+ mutex_init(&process->process_mutex);
+ ret = dxgvmb_send_create_process(process);
+ if (ret < 0) {
+ dev_dbg(dxgglobaldev, "send_create_process failed\n");
+ vfree(process);
+ process = NULL;
+ } else {
+ INIT_LIST_HEAD(&process->plistentry);
+ kref_init(&process->process_kref);
+
+ mutex_lock(&dxgglobal->plistmutex);
+ list_add_tail(&process->plistentry,
+ &dxgglobal->plisthead);
+ mutex_unlock(&dxgglobal->plistmutex);
+
+ hmgrtable_init(&process->handle_table, process);
+ hmgrtable_init(&process->local_handle_table, process);
+ INIT_LIST_HEAD(&process->process_adapter_list_head);
+ }
+ }
+ return process;
+}
+
+void dxgprocess_destroy(struct dxgprocess *process)
+{
+ int i;
+ enum hmgrentry_type t;
+ struct d3dkmthandle h;
+ void *o;
+ struct dxgsyncobject *syncobj;
+ struct dxgprocess_adapter *entry;
+ struct dxgprocess_adapter *tmp;
+
+ /* Destroy all adapter state */
+ dxgglobal_acquire_process_adapter_lock();
+ list_for_each_entry_safe(entry, tmp,
+ &process->process_adapter_list_head,
+ process_adapter_list_entry) {
+ dxgprocess_adapter_destroy(entry);
+ }
+ dxgglobal_release_process_adapter_lock();
+
+ i = 0;
+ while (hmgrtable_next_entry(&process->local_handle_table,
+ &i, &t, &h, &o)) {
+ switch (t) {
+ case HMGRENTRY_TYPE_DXGADAPTER:
+ dxgprocess_close_adapter(process, h);
+ break;
+ default:
+ pr_err("invalid entry in local handle table %d", t);
+ break;
+ }
+ }
+
+ i = 0;
+ while (hmgrtable_next_entry(&process->handle_table, &i, &t, &h, &o)) {
+ switch (t) {
+ case HMGRENTRY_TYPE_DXGSYNCOBJECT:
+ dev_dbg(dxgglobaldev, "Destroy syncobj: %p %d", o, i);
+ syncobj = o;
+ syncobj->handle.v = 0;
+ dxgsyncobject_destroy(process, syncobj);
+ break;
+ default:
+ pr_err("invalid entry in handle table %d", t);
+ break;
+ }
+ }
+
+ hmgrtable_destroy(&process->handle_table);
+ hmgrtable_destroy(&process->local_handle_table);
+
+ for (i = 0; i < 2; i++) {
+ if (process->test_handle_table[i]) {
+ hmgrtable_destroy(process->test_handle_table[i]);
+ vfree(process->test_handle_table[i]);
+ process->test_handle_table[i] = NULL;
+ }
+ }
+}
+
+void dxgprocess_release(struct kref *refcount)
+{
+ struct dxgprocess *process;
+
+ process = container_of(refcount, struct dxgprocess, process_kref);
+
+ mutex_lock(&dxgglobal->plistmutex);
+ list_del(&process->plistentry);
+ process->plistentry.next = NULL;
+ mutex_unlock(&dxgglobal->plistmutex);
+
+ dxgprocess_destroy(process);
+
+ if (process->host_handle.v)
+ dxgvmb_send_destroy_process(process->host_handle);
+ vfree(process);
+}
+
+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess
+ *process,
+ struct dxgadapter
+ *adapter)
+{
+ struct dxgprocess_adapter *entry;
+
+ list_for_each_entry(entry, &process->process_adapter_list_head,
+ process_adapter_list_entry) {
+ if (adapter == entry->adapter) {
+ dev_dbg(dxgglobaldev, "Found process info %p", entry);
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Dxgprocess takes references on dxgadapter and dxgprocess_adapter.
+ */
+int dxgprocess_open_adapter(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle *h)
+{
+ int ret = 0;
+ struct dxgprocess_adapter *adapter_info;
+ struct d3dkmthandle handle;
+
+ h->v = 0;
+ adapter_info = dxgprocess_get_adapter_info(process, adapter);
+ if (adapter_info == NULL) {
+ dev_dbg(dxgglobaldev, "creating new process adapter info\n");
+ adapter_info = dxgprocess_adapter_create(process, adapter);
+ if (adapter_info == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ } else {
+ adapter_info->refcount++;
+ }
+
+ handle = hmgrtable_alloc_handle_safe(&process->local_handle_table,
+ adapter, HMGRENTRY_TYPE_DXGADAPTER,
+ true);
+ if (handle.v) {
+ *h = handle;
+ } else {
+ pr_err("failed to create adapter handle\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ if (adapter_info) {
+ dxgglobal_acquire_process_adapter_lock();
+ dxgprocess_adapter_release(adapter_info);
+ dxgglobal_release_process_adapter_lock();
+ }
+ }
+
+ return ret;
+}
+
+int dxgprocess_close_adapter(struct dxgprocess *process,
+ struct d3dkmthandle handle)
+{
+ struct dxgadapter *adapter;
+ struct dxgprocess_adapter *adapter_info;
+ int ret = 0;
+
+ if (handle.v == 0)
+ return 0;
+
+ hmgrtable_lock(&process->local_handle_table, DXGLOCK_EXCL);
+ adapter = dxgprocess_get_adapter(process, handle);
+ if (adapter)
+ hmgrtable_free_handle(&process->local_handle_table,
+ HMGRENTRY_TYPE_DXGADAPTER, handle);
+ hmgrtable_unlock(&process->local_handle_table, DXGLOCK_EXCL);
+
+ if (adapter) {
+ adapter_info = dxgprocess_get_adapter_info(process, adapter);
+ if (adapter_info) {
+ dxgglobal_acquire_process_adapter_lock();
+ dxgprocess_adapter_release(adapter_info);
+ dxgglobal_release_process_adapter_lock();
+ } else {
+ ret = -EINVAL;
+ }
+ } else {
+ pr_err("%s failed %x", __func__, handle.v);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process,
+ struct d3dkmthandle handle)
+{
+ struct dxgadapter *adapter;
+
+ adapter = hmgrtable_get_object_by_type(&process->local_handle_table,
+ HMGRENTRY_TYPE_DXGADAPTER,
+ handle, true);
+ if (adapter == NULL)
+ pr_err("%s failed %x\n", __func__, handle.v);
+ return adapter;
+}
+
+/*
+ * Gets the adapter object from the process handle table.
+ * The adapter object is referenced.
+ * The function acquired the handle table lock shared.
+ */
+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process,
+ struct d3dkmthandle handle,
+ bool error_on_missing)
+{
+ struct dxgadapter *adapter;
+
+ hmgrtable_lock(&process->local_handle_table, DXGLOCK_SHARED);
+ adapter = hmgrtable_get_object_by_type(&process->local_handle_table,
+ HMGRENTRY_TYPE_DXGADAPTER,
+ handle, error_on_missing);
+ if (adapter == NULL) {
+ if (error_on_missing)
+ pr_err("adapter_by_handle failed %x\n", handle.v);
+ } else if (kref_get_unless_zero(&adapter->adapter_kref) == 0) {
+ pr_err("failed to acquire adapter reference\n");
+ adapter = NULL;
+ }
+ hmgrtable_unlock(&process->local_handle_table, DXGLOCK_SHARED);
+ return adapter;
+}
+
+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process,
+ enum hmgrentry_type t,
+ struct d3dkmthandle handle)
+{
+ struct dxgdevice *device = NULL;
+ void *obj;
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+ obj = hmgrtable_get_object_by_type(&process->handle_table, t, handle, true);
+ if (obj) {
+ struct d3dkmthandle device_handle = {};
+
+ switch (t) {
+ case HMGRENTRY_TYPE_DXGDEVICE:
+ device = obj;
+ break;
+ case HMGRENTRY_TYPE_DXGCONTEXT:
+ device_handle =
+ ((struct dxgcontext *)obj)->device_handle;
+ break;
+ case HMGRENTRY_TYPE_DXGPAGINGQUEUE:
+ device_handle =
+ ((struct dxgpagingqueue *)obj)->device_handle;
+ break;
+ case HMGRENTRY_TYPE_DXGHWQUEUE:
+ device_handle =
+ ((struct dxghwqueue *)obj)->device_handle;
+ break;
+ default:
+ pr_err("invalid handle type: %d\n", t);
+ break;
+ }
+ if (device == NULL)
+ device = hmgrtable_get_object_by_type(
+ &process->handle_table,
+ HMGRENTRY_TYPE_DXGDEVICE,
+ device_handle, true);
+ if (device)
+ if (kref_get_unless_zero(&device->device_kref) == 0)
+ device = NULL;
+ }
+ if (device == NULL)
+ pr_err("device_by_handle failed: %d %x\n", t, handle.v);
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+ return device;
+}
+
+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process,
+ struct d3dkmthandle handle)
+{
+ return dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGDEVICE,
+ handle);
+}
+
+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process)
+{
+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+}
+
+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process)
+{
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+}
+
+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process)
+{
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+}
+
+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process)
+{
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+}
diff --git a/virtio_dxgkrnl/dxgsyncfile.c b/virtio_dxgkrnl/dxgsyncfile.c
new file mode 100644
index 0000000..eefda1f
--- /dev/null
+++ b/virtio_dxgkrnl/dxgsyncfile.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Ioctl implementation
+ *
+ */
+
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/mman.h>
+
+#include "dxgkrnl.h"
+#include "dxgvmbus.h"
+#include "dxgsyncfile.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+#undef dev_fmt
+#define dev_fmt(fmt) "dxgk: " fmt
+
+static const struct dma_fence_ops dxgdmafence_ops;
+
+static struct dxgsyncpoint *to_syncpoint(struct dma_fence *fence)
+{
+ if (fence->ops != &dxgdmafence_ops)
+ return NULL;
+ return container_of(fence, struct dxgsyncpoint, base);
+}
+
+int dxgk_create_sync_file(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_createsyncfile args;
+ struct dxgsyncpoint *pt = NULL;
+ int ret = 0;
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ struct sync_file *sync_file = NULL;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dkmt_waitforsynchronizationobjectfromcpu waitargs = {};
+
+ if (fd < 0) {
+ pr_err("get_unused_fd_flags failed: %d", fd);
+ ret = fd;
+ goto cleanup;
+ }
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ pr_err("dxgprocess_device_by_handle failed");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0) {
+ pr_err("dxgdevice_acquire_lock_shared failed");
+ device = NULL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ pr_err("dxgadapter_acquire_lock_shared failed");
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ spin_lock_init(&pt->lock);
+ pt->fence_value = args.fence_value;
+ pt->context = dma_fence_context_alloc(1);
+ pt->hdr.event_id = dxgglobal_new_host_event_id();
+ pt->hdr.event_type = dxghostevent_dma_fence;
+ dxgglobal_add_host_event(&pt->hdr);
+
+ dma_fence_init(&pt->base, &dxgdmafence_ops, &pt->lock,
+ pt->context, args.fence_value);
+
+ sync_file = sync_file_create(&pt->base);
+ if (sync_file == NULL) {
+ pr_err("sync_file_create failed");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ dma_fence_put(&pt->base);
+
+ waitargs.device = args.device;
+ waitargs.object_count = 1;
+ waitargs.objects = &args.monitored_fence;
+ waitargs.fence_values = &args.fence_value;
+ ret = dxgvmb_send_wait_sync_object_cpu(process, adapter,
+ &waitargs, false,
+ pt->hdr.event_id);
+ if (ret < 0) {
+ pr_err("dxgvmb_send_wait_sync_object_cpu failed");
+ goto cleanup;
+ }
+
+ args.sync_file_handle = (u64)fd;
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy output args", __func__);
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ fd_install(fd, sync_file->file);
+
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ dxgdevice_release_lock_shared(device);
+ if (ret) {
+ if (sync_file) {
+ fput(sync_file->file);
+ /* sync_file_release will destroy dma_fence */
+ pt = NULL;
+ }
+ if (pt)
+ dma_fence_put(&pt->base);
+ if (fd >= 0)
+ put_unused_fd(fd);
+ }
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static const char *dxgdmafence_get_driver_name(struct dma_fence *fence)
+{
+ return "dxgkrnl";
+}
+
+static const char *dxgdmafence_get_timeline_name(struct dma_fence *fence)
+{
+ return "no_timeline";
+}
+
+static void dxgdmafence_release(struct dma_fence *fence)
+{
+ struct dxgsyncpoint *syncpoint;
+
+ syncpoint = to_syncpoint(fence);
+ if (syncpoint) {
+ if (syncpoint->hdr.event_id)
+ dxgglobal_get_host_event(syncpoint->hdr.event_id);
+ kfree(syncpoint);
+ }
+}
+
+static bool dxgdmafence_signaled(struct dma_fence *fence)
+{
+ struct dxgsyncpoint *syncpoint;
+
+ syncpoint = to_syncpoint(fence);
+ if (syncpoint == 0)
+ return true;
+ return __dma_fence_is_later(syncpoint->fence_value, fence->seqno,
+ fence->ops);
+}
+
+static bool dxgdmafence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static void dxgdmafence_value_str(struct dma_fence *fence,
+ char *str, int size)
+{
+ snprintf(str, size, "%lld", fence->seqno);
+}
+
+static void dxgdmafence_timeline_value_str(struct dma_fence *fence,
+ char *str, int size)
+{
+ struct dxgsyncpoint *syncpoint;
+
+ syncpoint = to_syncpoint(fence);
+ snprintf(str, size, "%lld", syncpoint->fence_value);
+}
+
+static const struct dma_fence_ops dxgdmafence_ops = {
+ .get_driver_name = dxgdmafence_get_driver_name,
+ .get_timeline_name = dxgdmafence_get_timeline_name,
+ .enable_signaling = dxgdmafence_enable_signaling,
+ .signaled = dxgdmafence_signaled,
+ .release = dxgdmafence_release,
+ .fence_value_str = dxgdmafence_value_str,
+ .timeline_value_str = dxgdmafence_timeline_value_str,
+};
diff --git a/virtio_dxgkrnl/dxgsyncfile.h b/virtio_dxgkrnl/dxgsyncfile.h
new file mode 100644
index 0000000..a91fb2e
--- /dev/null
+++ b/virtio_dxgkrnl/dxgsyncfile.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Headers for sync file objects
+ *
+ */
+
+#ifndef _DXGSYNCFILE_H
+#define _DXGSYNCFILE_H
+
+#include <linux/sync_file.h>
+
+int dxgk_create_sync_file(struct dxgprocess *process, void *__user inargs);
+
+struct dxgsyncpoint {
+ struct dxghostevent hdr;
+ struct dma_fence base;
+ u64 fence_value;
+ u64 context;
+ spinlock_t lock;
+ u64 u64;
+};
+
+#endif /* _DXGSYNCFILE_H */
diff --git a/virtio_dxgkrnl/dxgvmbus.c b/virtio_dxgkrnl/dxgvmbus.c
new file mode 100644
index 0000000..a9c5603
--- /dev/null
+++ b/virtio_dxgkrnl/dxgvmbus.c
@@ -0,0 +1,3502 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * VM bus interface implementation
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/eventfd.h>
+#include <linux/hyperv.h>
+#include <linux/mman.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include "dxgkrnl.h"
+#include "dxgvmbus.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+
+static int init_message(struct dxgvmbusmsg *msg, struct dxgadapter *adapter,
+ struct dxgprocess *process, u32 size)
+{
+ bool use_ext_header = dxgglobal->vmbus_ver >=
+ DXGK_VMBUS_INTERFACE_VERSION;
+
+ if (use_ext_header)
+ size += sizeof(struct dxgvmb_ext_header);
+ msg->size = size;
+ if (size <= VMBUSMESSAGEONSTACK) {
+ msg->hdr = (void *)msg->msg_on_stack;
+ memset(msg->hdr, 0, size);
+ } else {
+ msg->hdr = kzalloc(size, GFP_ATOMIC);
+ if (msg->hdr == NULL)
+ return -ENOMEM;
+ }
+ if (use_ext_header) {
+ msg->msg = (char *)&msg->hdr[1];
+ msg->hdr->command_offset = sizeof(msg->hdr[0]);
+ if (adapter)
+ msg->hdr->vgpu_luid = adapter->host_vgpu_luid;
+ } else {
+ msg->msg = (char *)msg->hdr;
+ }
+ if (adapter && !dxgglobal->async_msg_enabled)
+ msg->channel = &adapter->channel;
+ else
+ msg->channel = &dxgglobal->channel;
+ return 0;
+}
+
+static int init_message_res(struct dxgvmbusmsgres *msg,
+ struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ u32 size,
+ u32 result_size)
+{
+ bool use_ext_header = dxgglobal->vmbus_ver >=
+ DXGK_VMBUS_INTERFACE_VERSION;
+
+ if (use_ext_header)
+ size += sizeof(struct dxgvmb_ext_header);
+ msg->size = size;
+ msg->res_size += (result_size + 7) & ~7;
+ size += msg->res_size;
+ msg->hdr = kzalloc(size, GFP_ATOMIC);
+ if (msg->hdr == NULL) {
+ pr_err("Failed to allocate VM bus message: %d", size);
+ return -ENOMEM;
+ }
+ if (use_ext_header) {
+ msg->msg = (char *)&msg->hdr[1];
+ msg->hdr->command_offset = sizeof(msg->hdr[0]);
+ msg->hdr->vgpu_luid = adapter->host_vgpu_luid;
+ } else {
+ msg->msg = (char *)msg->hdr;
+ }
+ msg->res = (char *)msg->hdr + msg->size;
+ if (dxgglobal->async_msg_enabled)
+ msg->channel = &dxgglobal->channel;
+ else
+ msg->channel = &adapter->channel;
+ return 0;
+}
+
+static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process)
+{
+ if (msg->hdr && (char *)msg->hdr != msg->msg_on_stack)
+ kfree(msg->hdr);
+}
+
+int ntstatus2int(struct ntstatus status)
+{
+ if (NT_SUCCESS(status))
+ return (int)status.v;
+ switch (status.v) {
+ case STATUS_OBJECT_NAME_COLLISION:
+ return -EEXIST;
+ case STATUS_NO_MEMORY:
+ return -ENOMEM;
+ case STATUS_INVALID_PARAMETER:
+ return -EINVAL;
+ case STATUS_OBJECT_NAME_INVALID:
+ case STATUS_OBJECT_NAME_NOT_FOUND:
+ return -ENOENT;
+ case STATUS_TIMEOUT:
+ return -EAGAIN;
+ case STATUS_BUFFER_TOO_SMALL:
+ return -EOVERFLOW;
+ case STATUS_DEVICE_REMOVED:
+ return -ENODEV;
+ case STATUS_ACCESS_DENIED:
+ return -EACCES;
+ case STATUS_NOT_SUPPORTED:
+ return -EPERM;
+ case STATUS_ILLEGAL_INSTRUCTION:
+ return -EOPNOTSUPP;
+ case STATUS_INVALID_HANDLE:
+ return -EBADF;
+ case STATUS_GRAPHICS_ALLOCATION_BUSY:
+ return -EINPROGRESS;
+ case STATUS_OBJECT_TYPE_MISMATCH:
+ return -EPROTOTYPE;
+ case STATUS_NOT_IMPLEMENTED:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Helper functions
+ */
+
+static void command_vm_to_host_init2(struct dxgkvmb_command_vm_to_host *command,
+ enum dxgkvmb_commandtype_global t,
+ struct d3dkmthandle process)
+{
+ command->command_type = t;
+ command->process = process;
+ command->command_id = 0;
+ command->channel_type = DXGKVMB_VM_TO_HOST;
+}
+
+static void command_vgpu_to_host_init1(struct dxgkvmb_command_vgpu_to_host
+ *command,
+ enum dxgkvmb_commandtype type)
+{
+ command->command_type = type;
+ command->process.v = 0;
+ command->command_id = 0;
+ command->channel_type = DXGKVMB_VGPU_TO_HOST;
+}
+
+static void command_vgpu_to_host_init2(struct dxgkvmb_command_vgpu_to_host
+ *command,
+ enum dxgkvmb_commandtype type,
+ struct d3dkmthandle process)
+{
+ command->command_type = type;
+ command->process = process;
+ command->command_id = 0;
+ command->channel_type = DXGKVMB_VGPU_TO_HOST;
+}
+
+static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command,
+ enum dxgkvmb_commandtype_global type)
+{
+ command->command_type = type;
+ command->process.v = 0;
+ command->command_id = 0;
+ command->channel_type = DXGKVMB_VM_TO_HOST;
+}
+
+static int
+dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel,
+ void *command, u32 cmd_size)
+{
+ struct ntstatus *status;
+ int ret;
+
+ status = kzalloc(sizeof(struct ntstatus), GFP_ATOMIC);
+
+ ret = dxgvmb_send_sync_msg(channel, command, cmd_size,
+ status, sizeof(struct ntstatus));
+ if (ret >= 0)
+ ret = ntstatus2int(*status);
+
+ kfree(status);
+ return ret;
+}
+
+static int check_iospace_address(unsigned long address, u32 size)
+{
+ if (address < dxgglobal->mmiospace_base ||
+ size > dxgglobal->mmiospace_size ||
+ address >= (dxgglobal->mmiospace_base +
+ dxgglobal->mmiospace_size - size)) {
+ pr_err("invalid iospace address %lx", address);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int dxg_unmap_iospace(void *va, u32 size)
+{
+ unsigned long page_addr;
+ int ret = 0;
+
+ page_addr = ((unsigned long)va) & PAGE_MASK;
+
+ dev_dbg(dxgglobaldev, "%s %p %x %x", __func__, va, page_addr, size);
+
+ /*
+ * When an app calls exit(), dxgkrnl is called to close the device
+ * with current->mm equal to NULL.
+ */
+ if (current->mm) {
+ ret = vm_munmap(page_addr, size);
+ if (ret) {
+ pr_err("vm_munmap failed %d", ret);
+ return -ENOTRECOVERABLE;
+ }
+ }
+ return 0;
+}
+
+static u8 *dxg_map_iospace(u64 iospace_address, u32 size,
+ unsigned long protection, bool cached)
+{
+ struct vm_area_struct *vma;
+ unsigned long va;
+ int ret = 0;
+
+ dev_dbg(dxgglobaldev, "%s: %llx %x %lx",
+ __func__, iospace_address, size, protection);
+ if (check_iospace_address(iospace_address, size) < 0) {
+ pr_err("%s: invalid address", __func__);
+ return NULL;
+ }
+
+ va = vm_mmap(NULL, 0, size, protection, MAP_SHARED | MAP_ANONYMOUS, 0);
+ if ((long)va <= 0) {
+ pr_err("vm_mmap failed %lx %d", va, size);
+ return NULL;
+ }
+
+ mmap_read_lock(current->mm);
+ vma = find_vma(current->mm, (unsigned long)va);
+ if (vma) {
+ pgprot_t prot = vma->vm_page_prot;
+
+ if (!cached)
+ prot = pgprot_writecombine(prot);
+ dev_dbg(dxgglobaldev, "vma: %lx %lx %lx",
+ vma->vm_start, vma->vm_end, va);
+ vma->vm_pgoff = iospace_address >> PAGE_SHIFT;
+ ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ size, prot);
+ if (ret)
+ pr_err("io_remap_pfn_range failed: %d", ret);
+ } else {
+ pr_err("failed to find vma: %p %lx", vma, va);
+ ret = -ENOMEM;
+ }
+ mmap_read_unlock(current->mm);
+
+ if (ret) {
+ dxg_unmap_iospace((void *)va, size);
+ return NULL;
+ }
+ dev_dbg(dxgglobaldev, "%s end: %lx", __func__, va);
+ return (u8 *) (va + iospace_address % PAGE_SIZE);
+}
+
+/*
+ * Global messages to the host
+ */
+
+int dxgvmb_send_set_iospace_region(u64 start, u64 len, u32 shared_mem_gpadl)
+{
+ int ret;
+ struct dxgkvmb_command_setiospaceregion *command;
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, NULL, NULL, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ command_vm_to_host_init1(&command->hdr,
+ DXGK_VMBCOMMAND_SETIOSPACEREGION);
+ command->start = start;
+ command->length = len;
+ command->shared_page_gpadl = shared_mem_gpadl;
+ ret = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel, msg.hdr,
+ msg.size);
+ if (ret < 0)
+ pr_err("send_set_iospace_region failed %x", ret);
+
+ dxgglobal_release_channel_lock();
+cleanup:
+ free_message(&msg, NULL);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_create_process(struct dxgprocess *process)
+{
+ int ret;
+ struct dxgkvmb_command_createprocess *command;
+ struct dxgkvmb_command_createprocess_return result = { 0 };
+ struct dxgvmbusmsg msg;
+ char s[WIN_MAX_PATH];
+ int i;
+
+ ret = init_message(&msg, NULL, process, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ command_vm_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_CREATEPROCESS);
+ command->process = process;
+ command->process_id = process->process->pid;
+ command->linux_process = 1;
+ s[0] = 0;
+ __get_task_comm(s, WIN_MAX_PATH, process->process);
+ for (i = 0; i < WIN_MAX_PATH; i++) {
+ command->process_name[i] = s[i];
+ if (s[i] == 0)
+ break;
+ }
+
+ ret = dxgvmb_send_sync_msg(&dxgglobal->channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0) {
+ pr_err("create_process failed %d", ret);
+ } else if (result.hprocess.v == 0) {
+ pr_err("create_process returned 0 handle");
+ ret = -ENOTRECOVERABLE;
+ } else {
+ process->host_handle = result.hprocess;
+ dev_dbg(dxgglobaldev, "create_process returned %x",
+ process->host_handle.v);
+ }
+
+ dxgglobal_release_channel_lock();
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_destroy_process(struct d3dkmthandle process)
+{
+ int ret;
+ struct dxgkvmb_command_destroyprocess *command;
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, NULL, NULL, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_DESTROYPROCESS,
+ process);
+ ret = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel,
+ msg.hdr, msg.size);
+ dxgglobal_release_channel_lock();
+
+cleanup:
+ free_message(&msg, NULL);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
+ struct dxgvmbuschannel *channel,
+ struct d3dkmt_opensyncobjectfromnthandle2
+ *args,
+ struct dxgsyncobject *syncobj)
+{
+ struct dxgkvmb_command_opensyncobject *command;
+ struct dxgkvmb_command_opensyncobject_return result = { };
+ int ret;
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, NULL, process, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT,
+ process->host_handle);
+ command->device = args->device;
+ command->global_sync_object = syncobj->shared_owner->host_shared_handle_nt;
+ command->flags = args->flags;
+ if (syncobj->monitored_fence)
+ command->engine_affinity =
+ args->monitored_fence.engine_affinity;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ ret = dxgvmb_send_sync_msg(channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+
+ dxgglobal_release_channel_lock();
+
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result.status);
+ if (ret < 0)
+ goto cleanup;
+
+ args->sync_object = result.sync_object;
+ if (syncobj->monitored_fence) {
+ void *va = dxg_map_iospace(result.guest_cpu_physical_address,
+ PAGE_SIZE, PROT_READ | PROT_WRITE,
+ true);
+ if (va == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ args->monitored_fence.fence_value_cpu_va = va;
+ args->monitored_fence.fence_value_gpu_va =
+ result.gpu_virtual_address;
+ syncobj->mapped_address = va;
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
+ struct d3dkmthandle object,
+ struct d3dkmthandle *shared_handle)
+{
+ struct dxgkvmb_command_createntsharedobject *command;
+ int ret;
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, NULL, process, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vm_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT,
+ process->host_handle);
+ command->object = object;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(),
+ msg.hdr, msg.size, shared_handle,
+ sizeof(*shared_handle));
+
+ dxgglobal_release_channel_lock();
+
+ if (ret < 0)
+ goto cleanup;
+ if (shared_handle->v == 0) {
+ pr_err("failed to create NT shared object");
+ ret = -ENOTRECOVERABLE;
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle)
+{
+ struct dxgkvmb_command_destroyntsharedobject *command;
+ int ret;
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, NULL, NULL, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vm_to_host_init1(&command->hdr,
+ DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT);
+ command->shared_handle = shared_handle;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(),
+ msg.hdr, msg.size);
+
+ dxgglobal_release_channel_lock();
+
+cleanup:
+ free_message(&msg, NULL);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_destroy_sync_object(struct dxgprocess *process,
+ struct d3dkmthandle sync_object)
+{
+ struct dxgkvmb_command_destroysyncobject *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, NULL, process, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ command_vm_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_DESTROYSYNCOBJECT,
+ process->host_handle);
+ command->sync_object = sync_object;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(),
+ msg.hdr, msg.size);
+
+ dxgglobal_release_channel_lock();
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_share_object_with_host(struct dxgprocess *process,
+ struct d3dkmt_shareobjectwithhost *args)
+{
+ struct dxgkvmb_command_shareobjectwithhost *command;
+ struct dxgkvmb_command_shareobjectwithhost_return result = {};
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, NULL, process, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ command_vm_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST,
+ process->host_handle);
+ command->device_handle = args->device_handle;
+ command->object_handle = args->object_handle;
+
+ ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(),
+ msg.hdr, msg.size, &result, sizeof(result));
+
+ dxgglobal_release_channel_lock();
+
+ if (ret || !NT_SUCCESS(result.status)) {
+ if (ret == 0)
+ ret = ntstatus2int(result.status);
+ pr_err("DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST failed: %d %x",
+ ret, result.status.v);
+ goto cleanup;
+ }
+ args->object_vail_nt_handle = result.vail_nt_handle;
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_present_virtual(struct dxgprocess *process,
+ struct d3dkmt_presentvirtual *args,
+ __u64 acquire_semaphore_nthandle,
+ __u64 release_semaphore_nthandle,
+ __u64 composition_memory_nthandle)
+{
+ struct dxgkvmb_command_presentvirtual *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+ u32 cmd_size = 0;
+
+ cmd_size = sizeof(struct dxgkvmb_command_presentvirtual) + args->private_data_size;
+
+ ret = init_message(&msg, NULL, process, cmd_size);
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vm_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_PRESENTVIRTUAL,
+ process->host_handle);
+ command->acquire_semaphore_nthandle = acquire_semaphore_nthandle;
+ command->release_semaphore_nthandle = release_semaphore_nthandle;
+ command->composition_memory_nthandle = composition_memory_nthandle;
+ command->private_data_size = args->private_data_size;
+
+
+ if (args->private_data_size) {
+ ret = copy_from_user(&command[1],
+ args->private_data,
+ args->private_data_size);
+ if (ret) {
+ pr_err("%s failed to copy user data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = dxgglobal_acquire_channel_lock();
+ if (ret < 0)
+ goto cleanup;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(), msg.hdr,
+ msg.size);
+
+ dxgglobal_release_channel_lock();
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+/*
+ * Virtual GPU messages to the host
+ */
+
+int dxgvmb_send_open_adapter(struct dxgadapter *adapter)
+{
+ int ret;
+ struct dxgkvmb_command_openadapter *command;
+ struct dxgkvmb_command_openadapter_return result = { };
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, adapter, NULL, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_OPENADAPTER);
+ command->vmbus_interface_version = dxgglobal->vmbus_ver;
+ command->vmbus_last_compatible_interface_version =
+ DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result.status);
+ adapter->host_handle = result.host_adapter_handle;
+
+cleanup:
+ free_message(&msg, NULL);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_close_adapter(struct dxgadapter *adapter)
+{
+ int ret;
+ struct dxgkvmb_command_closeadapter *command;
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, adapter, NULL, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_CLOSEADAPTER);
+ command->host_handle = adapter->host_handle;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+ free_message(&msg, NULL);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter)
+{
+ int ret;
+ struct dxgkvmb_command_getinternaladapterinfo *command;
+ struct dxgkvmb_command_getinternaladapterinfo_return result = { };
+ struct dxgvmbusmsg msg;
+ u32 result_size = sizeof(result);
+
+ ret = init_message(&msg, adapter, NULL, sizeof(*command));
+ if (ret)
+ return ret;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init1(&command->hdr,
+ DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO);
+ if (dxgglobal->vmbus_ver < DXGK_VMBUS_INTERFACE_VERSION)
+ result_size -= sizeof(struct winluid);
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, result_size);
+ if (ret >= 0) {
+ adapter->host_adapter_luid = result.host_adapter_luid;
+ adapter->host_vgpu_luid = result.host_vgpu_luid;
+ wcsncpy(adapter->device_description, result.device_description,
+ sizeof(adapter->device_description) / sizeof(u16));
+ wcsncpy(adapter->device_instance_id, result.device_instance_id,
+ sizeof(adapter->device_instance_id) / sizeof(u16));
+ dxgglobal->async_msg_enabled = result.async_msg_enabled != 0;
+ }
+ free_message(&msg, NULL);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmt_createdevice *args)
+{
+ int ret;
+ struct dxgkvmb_command_createdevice *command;
+ struct dxgkvmb_command_createdevice_return result = { };
+ struct dxgvmbusmsg msg;
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_CREATEDEVICE,
+ process->host_handle);
+ command->flags = args->flags;
+ command->error_code = &dxgglobal->device_state_counter;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0)
+ result.device.v = 0;
+ free_message(&msg, process);
+cleanup:
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return result.device;
+}
+
+int dxgvmb_send_destroy_device(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmthandle h)
+{
+ int ret;
+ struct dxgkvmb_command_destroydevice *command;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_DESTROYDEVICE,
+ process->host_handle);
+ command->device = h;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_flush_device(struct dxgdevice *device,
+ enum dxgdevice_flushschedulerreason reason)
+{
+ int ret;
+ struct dxgkvmb_command_flushdevice *command;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+ struct dxgprocess *process = device->process;
+
+ ret = init_message(&msg, device->adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_FLUSHDEVICE,
+ process->host_handle);
+ command->device = device->handle;
+ command->reason = reason;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+struct d3dkmthandle
+dxgvmb_send_create_context(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmt_createcontextvirtual *args)
+{
+ struct dxgkvmb_command_createcontextvirtual *command = NULL;
+ u32 cmd_size;
+ int ret;
+ struct d3dkmthandle context = {};
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("PrivateDriverDataSize is invalid");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ cmd_size = sizeof(struct dxgkvmb_command_createcontextvirtual) +
+ args->priv_drv_data_size - 1;
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL,
+ process->host_handle);
+ command->device = args->device;
+ command->node_ordinal = args->node_ordinal;
+ command->engine_affinity = args->engine_affinity;
+ command->flags = args->flags;
+ command->client_hint = args->client_hint;
+ command->priv_drv_data_size = args->priv_drv_data_size;
+ if (args->priv_drv_data_size) {
+ ret = copy_from_user(command->priv_drv_data,
+ args->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s Faled to copy private data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ /* Input command is returned back as output */
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ command, cmd_size);
+ if (ret < 0) {
+ goto cleanup;
+ } else {
+ context = command->context;
+ if (args->priv_drv_data_size) {
+ ret = copy_to_user(args->priv_drv_data,
+ command->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s Faled to copy private data to user",
+ __func__);
+ ret = -EINVAL;
+ dxgvmb_send_destroy_context(adapter, process,
+ context);
+ context.v = 0;
+ }
+ }
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return context;
+}
+
+int dxgvmb_send_destroy_context(struct dxgadapter *adapter,
+ struct dxgprocess *process,
+ struct d3dkmthandle h)
+{
+ int ret;
+ struct dxgkvmb_command_destroycontext *command;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_DESTROYCONTEXT,
+ process->host_handle);
+ command->context = h;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_create_paging_queue(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct d3dkmt_createpagingqueue *args,
+ struct dxgpagingqueue *pqueue)
+{
+ struct dxgkvmb_command_createpagingqueue_return result;
+ struct dxgkvmb_command_createpagingqueue *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, device->adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CREATEPAGINGQUEUE,
+ process->host_handle);
+ command->args = *args;
+ args->paging_queue.v = 0;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result,
+ sizeof(result));
+ if (ret < 0) {
+ pr_err("send_create_paging_queue failed %x", ret);
+ goto cleanup;
+ }
+
+ args->paging_queue = result.paging_queue;
+ args->sync_object = result.sync_object;
+ args->fence_cpu_virtual_address =
+ dxg_map_iospace(result.fence_storage_physical_address, PAGE_SIZE,
+ PROT_READ | PROT_WRITE, true);
+ if (args->fence_cpu_virtual_address == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ pqueue->mapped_address = args->fence_cpu_virtual_address;
+ pqueue->handle = args->paging_queue;
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle h)
+{
+ int ret;
+ struct dxgkvmb_command_destroypagingqueue *command;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE,
+ process->host_handle);
+ command->paging_queue = h;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+static int
+copy_private_data(struct d3dkmt_createallocation *args,
+ struct dxgkvmb_command_createallocation *command,
+ struct d3dddi_allocationinfo *input_alloc_info,
+ struct d3dkmt_createstandardallocation *standard_alloc)
+{
+ struct dxgkvmb_command_createallocation_allocinfo *alloc_info;
+ struct d3dddi_allocationinfo *input_alloc;
+ int ret = 0;
+ int i;
+ u8 *private_data_dest = (u8 *) &command[1] +
+ (args->alloc_count *
+ sizeof(struct dxgkvmb_command_createallocation_allocinfo));
+
+ if (args->private_runtime_data_size) {
+ ret = copy_from_user(private_data_dest,
+ args->private_runtime_data,
+ args->private_runtime_data_size);
+ if (ret) {
+ pr_err("%s failed to copy runtime data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ private_data_dest += args->private_runtime_data_size;
+ }
+
+ if (args->flags.standard_allocation) {
+ dev_dbg(dxgglobaldev, "private data offset %d",
+ (u32) (private_data_dest - (u8 *) command));
+
+ args->priv_drv_data_size = sizeof(*args->standard_allocation);
+ memcpy(private_data_dest, standard_alloc,
+ sizeof(*standard_alloc));
+ private_data_dest += args->priv_drv_data_size;
+ } else if (args->priv_drv_data_size) {
+ ret = copy_from_user(private_data_dest,
+ args->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy private data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ private_data_dest += args->priv_drv_data_size;
+ }
+
+ alloc_info = (void *)&command[1];
+ input_alloc = input_alloc_info;
+ if (input_alloc_info[0].sysmem)
+ command->flags.existing_sysmem = 1;
+ for (i = 0; i < args->alloc_count; i++) {
+ alloc_info->flags = input_alloc->flags.value;
+ alloc_info->vidpn_source_id = input_alloc->vidpn_source_id;
+ alloc_info->priv_drv_data_size =
+ input_alloc->priv_drv_data_size;
+ if (input_alloc->priv_drv_data_size) {
+ ret = copy_from_user(private_data_dest,
+ input_alloc->priv_drv_data,
+ input_alloc->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ private_data_dest += input_alloc->priv_drv_data_size;
+ }
+ alloc_info++;
+ input_alloc++;
+ }
+
+cleanup:
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int create_existing_sysmem(struct dxgdevice *device,
+ struct dxgkvmb_command_allocinfo_return *host_alloc,
+ struct dxgallocation *dxgalloc,
+ bool read_only,
+ const void *sysmem)
+{
+ int ret1 = 0;
+ void *kmem = NULL;
+ int ret = 0;
+ struct dxgkvmb_command_setexistingsysmemstore *set_store_command;
+ struct dxgkvmb_command_setexistingsysmempages *set_pages_command;
+ u64 alloc_size = host_alloc->allocation_size;
+ u32 npages = alloc_size >> PAGE_SHIFT;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+ const u32 max_pfns_in_message =
+ (DXG_MAX_VM_BUS_PACKET_SIZE - sizeof(*set_pages_command) -
+ PAGE_SIZE) / sizeof(__u64);
+ u32 alloc_offset_in_pages = 0;
+ struct page **page_in;
+ u64 *pfn;
+ u32 pages_to_send;
+ u32 i;
+
+ /*
+ * Create a guest physical address list and set it as the allocation
+ * backing store in the host. This is done after creating the host
+ * allocation, because only now the allocation size is known.
+ */
+
+ dev_dbg(dxgglobaldev, " Alloc size: %lld", alloc_size);
+
+ dxgalloc->cpu_address = (void *)sysmem;
+
+ dxgalloc->pages = vzalloc(npages * sizeof(void *));
+ if (dxgalloc->pages == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret1 = get_user_pages_fast((unsigned long)sysmem, npages, !read_only,
+ dxgalloc->pages);
+ if (ret1 != npages) {
+ pr_err("get_user_pages_fast failed: %d", ret1);
+ if (ret1 > 0 && ret1 < npages)
+ release_pages(dxgalloc->pages, ret1);
+ vfree(dxgalloc->pages);
+ dxgalloc->pages = NULL;
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ if (!dxgglobal->map_guest_pages_enabled) {
+ ret = init_message(&msg, device->adapter, device->process,
+ sizeof(*set_store_command));
+ if (ret)
+ goto cleanup;
+ set_store_command = (void *)msg.msg;
+
+ kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL);
+ if (kmem == NULL) {
+ pr_err("vmap failed");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ command_vgpu_to_host_init2(&set_store_command->hdr,
+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE,
+ device->process->host_handle);
+ set_store_command->device = device->handle;
+ set_store_command->allocation = host_alloc->allocation;
+ set_store_command->gpadl = dxgalloc->gpadl;
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr,
+ msg.size);
+ if (ret < 0)
+ pr_err("failed to set existing store: %x", ret);
+ } else {
+ /*
+ * Send the list of the allocation PFNs to the host. The host
+ * will map the pages for GPU access.
+ */
+
+ ret = init_message(&msg, device->adapter, device->process,
+ sizeof(*set_pages_command) +
+ max_pfns_in_message * sizeof(u64));
+ if (ret)
+ goto cleanup;
+ set_pages_command = (void *)msg.msg;
+ command_vgpu_to_host_init2(&set_pages_command->hdr,
+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMPAGES,
+ device->process->host_handle);
+ set_pages_command->device = device->handle;
+ set_pages_command->allocation = host_alloc->allocation;
+
+ page_in = dxgalloc->pages;
+ while (alloc_offset_in_pages < npages) {
+ pfn = (u64 *)((char *)msg.msg +
+ sizeof(*set_pages_command));
+ pages_to_send = min(npages - alloc_offset_in_pages,
+ max_pfns_in_message);
+ set_pages_command->num_pages = pages_to_send;
+ set_pages_command->alloc_offset_in_pages =
+ alloc_offset_in_pages;
+
+ for (i = 0; i < pages_to_send; i++)
+ *pfn++ = page_to_pfn(*page_in++);
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel,
+ msg.hdr,
+ msg.size);
+ if (ret < 0) {
+ pr_err("failed to set existing pages: %x", ret);
+ break;
+ }
+ alloc_offset_in_pages += pages_to_send;
+ }
+ }
+
+cleanup:
+ if (kmem)
+ vunmap(kmem);
+ free_message(&msg, device->process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+static int
+process_allocation_handles(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct d3dkmt_createallocation *args,
+ struct dxgkvmb_command_createallocation_return *res,
+ struct dxgallocation **dxgalloc,
+ struct dxgresource *resource)
+{
+ int ret = 0;
+ int i;
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ if (args->flags.create_resource) {
+ ret = hmgrtable_assign_handle(&process->handle_table, resource,
+ HMGRENTRY_TYPE_DXGRESOURCE,
+ res->resource);
+ if (ret < 0) {
+ pr_err("failed to assign resource handle %x",
+ res->resource.v);
+ } else {
+ resource->handle = res->resource;
+ resource->handle_valid = 1;
+ }
+ }
+ for (i = 0; i < args->alloc_count; i++) {
+ struct dxgkvmb_command_allocinfo_return *host_alloc;
+
+ host_alloc = &res->allocation_info[i];
+ ret = hmgrtable_assign_handle(&process->handle_table,
+ dxgalloc[i],
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ host_alloc->allocation);
+ if (ret < 0) {
+ pr_err("failed to assign alloc handle %x %d %d",
+ host_alloc->allocation.v,
+ args->alloc_count, i);
+ break;
+ }
+ dxgalloc[i]->alloc_handle = host_alloc->allocation;
+ dxgalloc[i]->handle_valid = 1;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+static int
+create_local_allocations(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct d3dkmt_createallocation *args,
+ struct d3dkmt_createallocation *__user input_args,
+ struct d3dddi_allocationinfo *alloc_info,
+ struct dxgkvmb_command_createallocation_return *result,
+ struct dxgresource *resource,
+ struct dxgallocation **dxgalloc,
+ u32 destroy_buffer_size)
+{
+ int i;
+ int alloc_count = args->alloc_count;
+ u8 *alloc_private_data = NULL;
+ int ret = 0;
+ int ret1;
+ struct dxgkvmb_command_destroyallocation *destroy_buf;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, device->adapter, process,
+ destroy_buffer_size);
+ if (ret)
+ goto cleanup;
+ destroy_buf = (void *)msg.msg;
+
+ /* Prepare the command to destroy allocation in case of failure */
+ command_vgpu_to_host_init2(&destroy_buf->hdr,
+ DXGK_VMBCOMMAND_DESTROYALLOCATION,
+ process->host_handle);
+ destroy_buf->device = args->device;
+ destroy_buf->resource = args->resource;
+ destroy_buf->alloc_count = alloc_count;
+ destroy_buf->flags.assume_not_in_use = 1;
+ for (i = 0; i < alloc_count; i++) {
+ dev_dbg(dxgglobaldev, "host allocation: %d %x",
+ i, result->allocation_info[i].allocation.v);
+ destroy_buf->allocations[i] =
+ result->allocation_info[i].allocation;
+ }
+
+ if (args->flags.create_resource) {
+ dev_dbg(dxgglobaldev, "new resource: %x", result->resource.v);
+ ret = copy_to_user(&input_args->resource, &result->resource,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy resource handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ alloc_private_data = (u8 *) result +
+ sizeof(struct dxgkvmb_command_createallocation_return) +
+ sizeof(struct dxgkvmb_command_allocinfo_return) * (alloc_count - 1);
+
+ for (i = 0; i < alloc_count; i++) {
+ struct dxgkvmb_command_allocinfo_return *host_alloc;
+ struct d3dddi_allocationinfo *user_alloc;
+
+ host_alloc = &result->allocation_info[i];
+ user_alloc = &alloc_info[i];
+ dxgalloc[i]->num_pages =
+ host_alloc->allocation_size >> PAGE_SHIFT;
+ if (user_alloc->sysmem) {
+ ret = create_existing_sysmem(device, host_alloc,
+ dxgalloc[i],
+ args->flags.read_only != 0,
+ user_alloc->sysmem);
+ if (ret < 0) {
+ pr_err("create_existing_sysmem failed");
+ goto cleanup;
+ }
+ }
+ dxgalloc[i]->cached = host_alloc->allocation_flags.cached;
+ if (host_alloc->priv_drv_data_size) {
+ ret = copy_to_user(user_alloc->priv_drv_data,
+ alloc_private_data,
+ host_alloc->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy private data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ alloc_private_data += host_alloc->priv_drv_data_size;
+ }
+ ret = copy_to_user(&args->allocation_info[i].allocation,
+ &host_alloc->allocation,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy alloc handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = process_allocation_handles(process, device, args, result,
+ dxgalloc, resource);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(&input_args->global_share, &args->global_share,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy global share", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ /* Free local handles before freeing the handles in the host */
+ dxgdevice_acquire_alloc_list_lock(device);
+ if (dxgalloc)
+ for (i = 0; i < alloc_count; i++)
+ if (dxgalloc[i])
+ dxgallocation_free_handle(dxgalloc[i]);
+ if (resource && args->flags.create_resource)
+ dxgresource_free_handle(resource);
+ dxgdevice_release_alloc_list_lock(device);
+
+ /* Destroy allocations in the host to unmap gpadls */
+ ret1 = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr,
+ msg.size);
+ if (ret1 < 0)
+ pr_err("failed to destroy allocations: %x", ret1);
+
+ dxgdevice_acquire_alloc_list_lock(device);
+ if (dxgalloc) {
+ for (i = 0; i < alloc_count; i++) {
+ if (dxgalloc[i]) {
+ dxgalloc[i]->alloc_handle.v = 0;
+ dxgallocation_destroy(dxgalloc[i]);
+ dxgalloc[i] = NULL;
+ }
+ }
+ }
+ if (resource && args->flags.create_resource) {
+ /*
+ * Prevent the resource memory from freeing.
+ * It will be freed in the top level function.
+ */
+ kref_get(&resource->resource_kref);
+ dxgresource_destroy(resource);
+ }
+ dxgdevice_release_alloc_list_lock(device);
+ }
+
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_create_allocation(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct d3dkmt_createallocation *args,
+ struct d3dkmt_createallocation *__user
+ input_args,
+ struct dxgresource *resource,
+ struct dxgallocation **dxgalloc,
+ struct d3dddi_allocationinfo *alloc_info,
+ struct d3dkmt_createstandardallocation
+ *standard_alloc)
+{
+ struct dxgkvmb_command_createallocation *command = NULL;
+ struct dxgkvmb_command_createallocation_return *result = NULL;
+ int ret = -EINVAL;
+ int i;
+ u32 result_size = 0;
+ u32 cmd_size = 0;
+ u32 destroy_buffer_size = 0;
+ u32 priv_drv_data_size;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (args->private_runtime_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE ||
+ args->priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EOVERFLOW;
+ goto cleanup;
+ }
+
+ /*
+ * Preallocate the buffer, which will be used for destruction in case
+ * of a failure
+ */
+ destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) +
+ args->alloc_count * sizeof(struct d3dkmthandle);
+
+ /* Compute the total private driver size */
+
+ priv_drv_data_size = 0;
+
+ for (i = 0; i < args->alloc_count; i++) {
+ if (alloc_info[i].priv_drv_data_size >=
+ DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EOVERFLOW;
+ goto cleanup;
+ } else {
+ priv_drv_data_size += alloc_info[i].priv_drv_data_size;
+ }
+ if (priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EOVERFLOW;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Private driver data for the result includes only per allocation
+ * private data
+ */
+ result_size = sizeof(struct dxgkvmb_command_createallocation_return) +
+ (args->alloc_count - 1) *
+ sizeof(struct dxgkvmb_command_allocinfo_return) +
+ priv_drv_data_size;
+ result = vzalloc(result_size);
+ if (result == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Private drv data for the command includes the global private data */
+ priv_drv_data_size += args->priv_drv_data_size;
+
+ cmd_size = sizeof(struct dxgkvmb_command_createallocation) +
+ args->alloc_count *
+ sizeof(struct dxgkvmb_command_createallocation_allocinfo) +
+ args->private_runtime_data_size + priv_drv_data_size;
+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EOVERFLOW;
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "command size, driver_data_size %d %d %ld %ld",
+ cmd_size, priv_drv_data_size,
+ sizeof(struct dxgkvmb_command_createallocation),
+ sizeof(struct dxgkvmb_command_createallocation_allocinfo));
+
+ ret = init_message(&msg, device->adapter, process,
+ cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CREATEALLOCATION,
+ process->host_handle);
+ command->device = args->device;
+ command->flags = args->flags;
+ command->resource = args->resource;
+ command->private_runtime_resource_handle =
+ args->private_runtime_resource_handle;
+ command->alloc_count = args->alloc_count;
+ command->private_runtime_data_size = args->private_runtime_data_size;
+ command->priv_drv_data_size = args->priv_drv_data_size;
+
+ ret = copy_private_data(args, command, alloc_info, standard_alloc);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ result, result_size);
+ if (ret < 0) {
+ pr_err("send_create_allocation failed %x", ret);
+ goto cleanup;
+ }
+
+ ret = ntstatus2int(result->status);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = create_local_allocations(process, device, args, input_args,
+ alloc_info, result, resource, dxgalloc,
+ destroy_buffer_size);
+cleanup:
+
+ if (result)
+ vfree(result);
+ free_message(&msg, process);
+
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_destroy_allocation(struct dxgprocess *process,
+ struct dxgdevice *device,
+ struct d3dkmt_destroyallocation2 *args,
+ struct d3dkmthandle *alloc_handles)
+{
+ struct dxgkvmb_command_destroyallocation *destroy_buffer;
+ u32 destroy_buffer_size;
+ int ret;
+ int allocations_size = args->alloc_count * sizeof(struct d3dkmthandle);
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) +
+ allocations_size;
+
+ ret = init_message(&msg, device->adapter, process,
+ destroy_buffer_size);
+ if (ret)
+ goto cleanup;
+ destroy_buffer = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&destroy_buffer->hdr,
+ DXGK_VMBCOMMAND_DESTROYALLOCATION,
+ process->host_handle);
+ destroy_buffer->device = args->device;
+ destroy_buffer->resource = args->resource;
+ destroy_buffer->alloc_count = args->alloc_count;
+ destroy_buffer->flags = args->flags;
+ if (allocations_size)
+ memcpy(destroy_buffer->allocations, alloc_handles,
+ allocations_size);
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryclockcalibration
+ *args,
+ struct d3dkmt_queryclockcalibration
+ *__user inargs)
+{
+ struct dxgkvmb_command_queryclockcalibration *command;
+ struct dxgkvmb_command_queryclockcalibration_return result;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0)
+ goto cleanup;
+ ret = copy_to_user(&inargs->clock_data, &result.clock_data,
+ sizeof(result.clock_data));
+ if (ret) {
+ pr_err("%s failed to copy clock data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = ntstatus2int(result.status);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_flushheaptransitions *args)
+{
+ struct dxgkvmb_command_flushheaptransitions *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS,
+ process->host_handle);
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryallocationresidency
+ *args)
+{
+ int ret = -EINVAL;
+ struct dxgkvmb_command_queryallocationresidency *command = NULL;
+ u32 cmd_size = sizeof(*command);
+ u32 alloc_size = 0;
+ u32 result_allocation_size = 0;
+ struct dxgkvmb_command_queryallocationresidency_return *result = NULL;
+ u32 result_size = sizeof(*result);
+ struct dxgvmbusmsgres msg = {.hdr = NULL};
+
+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args->allocation_count) {
+ alloc_size = args->allocation_count *
+ sizeof(struct d3dkmthandle);
+ cmd_size += alloc_size;
+ result_allocation_size = args->allocation_count *
+ sizeof(args->residency_status[0]);
+ } else {
+ result_allocation_size = sizeof(args->residency_status[0]);
+ }
+ result_size += result_allocation_size;
+
+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ result = msg.res;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY,
+ process->host_handle);
+ command->args = *args;
+ if (alloc_size) {
+ ret = copy_from_user(&command[1], args->allocations,
+ alloc_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ result, msg.res_size);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result->status);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(args->residency_status, &result[1],
+ result_allocation_size);
+ if (ret) {
+ pr_err("%s failed to copy residency status", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ free_message((struct dxgvmbusmsg *)&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_escape(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_escape *args)
+{
+ int ret;
+ struct dxgkvmb_command_escape *command = NULL;
+ u32 cmd_size = sizeof(*command);
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ cmd_size = cmd_size - sizeof(args->priv_drv_data[0]) +
+ args->priv_drv_data_size;
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_ESCAPE,
+ process->host_handle);
+ command->adapter = args->adapter;
+ command->device = args->device;
+ command->type = args->type;
+ command->flags = args->flags;
+ command->priv_drv_data_size = args->priv_drv_data_size;
+ command->context = args->context;
+ if (args->priv_drv_data_size) {
+ ret = copy_from_user(command->priv_drv_data,
+ args->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy priv data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ command->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret < 0)
+ goto cleanup;
+
+ if (args->priv_drv_data_size) {
+ ret = copy_to_user(args->priv_drv_data,
+ command->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy priv data", __func__);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryvideomemoryinfo *args,
+ struct d3dkmt_queryvideomemoryinfo *__user
+ output)
+{
+ int ret;
+ struct dxgkvmb_command_queryvideomemoryinfo *command;
+ struct dxgkvmb_command_queryvideomemoryinfo_return result = { };
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ command_vgpu_to_host_init2(&command->hdr,
+ dxgk_vmbcommand_queryvideomemoryinfo,
+ process->host_handle);
+ command->adapter = args->adapter;
+ command->memory_segment_group = args->memory_segment_group;
+ command->physical_adapter_index = args->physical_adapter_index;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(&output->budget, &result.budget,
+ sizeof(output->budget));
+ if (ret) {
+ pr_err("%s failed to copy budget", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(&output->current_usage, &result.current_usage,
+ sizeof(output->current_usage));
+ if (ret) {
+ pr_err("%s failed to copy current usage", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(&output->current_reservation,
+ &result.current_reservation,
+ sizeof(output->current_reservation));
+ if (ret) {
+ pr_err("%s failed to copy reservation", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(&output->available_for_reservation,
+ &result.available_for_reservation,
+ sizeof(output->available_for_reservation));
+ if (ret) {
+ pr_err("%s failed to copy avail reservation", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_get_device_state(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_getdevicestate *args,
+ struct d3dkmt_getdevicestate *__user output)
+{
+ int ret;
+ struct dxgkvmb_command_getdevicestate *command;
+ struct dxgkvmb_command_getdevicestate_return result = { };
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ dxgk_vmbcommand_getdevicestate,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result.status);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(output, &result.args, sizeof(result.args));
+ if (ret) {
+ pr_err("%s failed to copy output args", __func__);
+ ret = -EINVAL;
+ }
+
+ if (args->state_type == _D3DKMT_DEVICESTATE_EXECUTION)
+ args->execution_state = result.args.execution_state;
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_open_resource(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle device,
+ struct d3dkmthandle global_share,
+ u32 allocation_count,
+ u32 total_priv_drv_data_size,
+ struct d3dkmthandle *resource_handle,
+ struct d3dkmthandle *alloc_handles)
+{
+ struct dxgkvmb_command_openresource *command;
+ struct dxgkvmb_command_openresource_return *result;
+ struct d3dkmthandle *handles;
+ int ret;
+ int i;
+ u32 result_size = allocation_count * sizeof(struct d3dkmthandle) +
+ sizeof(*result);
+ struct dxgvmbusmsgres msg = {.hdr = NULL};
+
+ ret = init_message_res(&msg, adapter, process, sizeof(*command),
+ result_size);
+ if (ret)
+ goto cleanup;
+ command = msg.msg;
+ result = msg.res;
+
+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENRESOURCE,
+ process->host_handle);
+ command->device = device;
+ command->nt_security_sharing = 1;
+ command->global_share = global_share;
+ command->allocation_count = allocation_count;
+ command->total_priv_drv_data_size = total_priv_drv_data_size;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ result, msg.res_size);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result->status);
+ if (ret < 0)
+ goto cleanup;
+
+ *resource_handle = result->resource;
+ handles = (struct d3dkmthandle *) &result[1];
+ for (i = 0; i < allocation_count; i++)
+ alloc_handles[i] = handles[i];
+
+cleanup:
+ free_message((struct dxgvmbusmsg *)&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device,
+ enum d3dkmdt_standardallocationtype alloctype,
+ struct d3dkmdt_gdisurfacedata *alloc_data,
+ u32 physical_adapter_index,
+ u32 *alloc_priv_driver_size,
+ void *priv_alloc_data,
+ u32 *res_priv_data_size,
+ void *priv_res_data)
+{
+ struct dxgkvmb_command_getstandardallocprivdata *command;
+ struct dxgkvmb_command_getstandardallocprivdata_return *result = NULL;
+ u32 result_size = sizeof(*result);
+ int ret;
+ struct dxgvmbusmsgres msg = {.hdr = NULL};
+
+ if (priv_alloc_data)
+ result_size += *alloc_priv_driver_size;
+ if (priv_res_data)
+ result_size += *res_priv_data_size;
+ ret = init_message_res(&msg, device->adapter, device->process,
+ sizeof(*command), result_size);
+ if (ret)
+ goto cleanup;
+ command = msg.msg;
+ result = msg.res;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA,
+ device->process->host_handle);
+
+ command->alloc_type = alloctype;
+ command->priv_driver_data_size = *alloc_priv_driver_size;
+ command->physical_adapter_index = physical_adapter_index;
+ command->priv_driver_resource_size = *res_priv_data_size;
+ switch (alloctype) {
+ case _D3DKMDT_STANDARDALLOCATION_GDISURFACE:
+ command->gdi_surface = *alloc_data;
+ break;
+ case _D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE:
+ case _D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE:
+ case _D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE:
+ default:
+ pr_err("Invalid standard alloc type");
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ result, msg.res_size);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result->status);
+ if (ret < 0)
+ goto cleanup;
+
+ if (*alloc_priv_driver_size &&
+ result->priv_driver_data_size != *alloc_priv_driver_size) {
+ pr_err("Priv data size mismatch");
+ goto cleanup;
+ }
+ if (*res_priv_data_size &&
+ result->priv_driver_resource_size != *res_priv_data_size) {
+ pr_err("Resource priv data size mismatch");
+ goto cleanup;
+ }
+ *alloc_priv_driver_size = result->priv_driver_data_size;
+ *res_priv_data_size = result->priv_driver_resource_size;
+ if (priv_alloc_data) {
+ memcpy(priv_alloc_data, &result[1],
+ result->priv_driver_data_size);
+ }
+ if (priv_res_data) {
+ memcpy(priv_res_data,
+ (char *)(&result[1]) + result->priv_driver_data_size,
+ result->priv_driver_resource_size);
+ }
+
+cleanup:
+
+ free_message((struct dxgvmbusmsg *)&msg, device->process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_make_resident(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dddi_makeresident *args)
+{
+ int ret;
+ u32 cmd_size;
+ struct dxgkvmb_command_makeresident_return result = { };
+ struct dxgkvmb_command_makeresident *command = NULL;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ cmd_size = (args->alloc_count - 1) * sizeof(struct d3dkmthandle) +
+ sizeof(struct dxgkvmb_command_makeresident);
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ ret = copy_from_user(command->allocations, args->allocation_list,
+ args->alloc_count *
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy alloc handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_MAKERESIDENT,
+ process->host_handle);
+ command->alloc_count = args->alloc_count;
+ command->paging_queue = args->paging_queue;
+ command->flags = args->flags;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0) {
+ pr_err("send_make_resident failed %x", ret);
+ goto cleanup;
+ }
+
+ args->paging_fence_value = result.paging_fence_value;
+ args->num_bytes_to_trim = result.num_bytes_to_trim;
+ ret = ntstatus2int(result.status);
+
+cleanup:
+
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_evict(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_evict *args)
+{
+ int ret;
+ u32 cmd_size;
+ struct dxgkvmb_command_evict_return result = { };
+ struct dxgkvmb_command_evict *command = NULL;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ cmd_size = (args->alloc_count - 1) * sizeof(struct d3dkmthandle) +
+ sizeof(struct dxgkvmb_command_evict);
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ ret = copy_from_user(command->allocations, args->allocations,
+ args->alloc_count *
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy alloc handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_EVICT, process->host_handle);
+ command->alloc_count = args->alloc_count;
+ command->device = args->device;
+ command->flags = args->flags;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0) {
+ pr_err("send_evict failed %x", ret);
+ goto cleanup;
+ }
+ args->num_bytes_to_trim = result.num_bytes_to_trim;
+
+cleanup:
+
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_submit_command(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_submitcommand *args)
+{
+ int ret;
+ u32 cmd_size;
+ struct dxgkvmb_command_submitcommand *command;
+ u32 hbufsize = args->num_history_buffers * sizeof(struct d3dkmthandle);
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ cmd_size = sizeof(struct dxgkvmb_command_submitcommand) +
+ hbufsize + args->priv_drv_data_size;
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ ret = copy_from_user(&command[1], args->history_buffer_array,
+ hbufsize);
+ if (ret) {
+ pr_err("%s failed to copy history buffer", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_from_user((u8 *) &command[1] + hbufsize,
+ args->priv_drv_data, args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy history priv data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_SUBMITCOMMAND,
+ process->host_handle);
+ command->args = *args;
+
+ if (dxgglobal->async_msg_enabled) {
+ command->hdr.async_msg = 1;
+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size);
+ } else {
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr,
+ msg.size);
+ }
+
+cleanup:
+
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_map_gpu_va(struct dxgprocess *process,
+ struct d3dkmthandle device,
+ struct dxgadapter *adapter,
+ struct d3dddi_mapgpuvirtualaddress *args)
+{
+ struct dxgkvmb_command_mapgpuvirtualaddress *command;
+ struct dxgkvmb_command_mapgpuvirtualaddress_return result;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS,
+ process->host_handle);
+ command->args = *args;
+ command->device = device;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result,
+ sizeof(result));
+ if (ret < 0)
+ goto cleanup;
+ args->virtual_address = result.virtual_address;
+ args->paging_fence_value = result.paging_fence_value;
+ ret = ntstatus2int(result.status);
+
+cleanup:
+
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dddi_reservegpuvirtualaddress *args)
+{
+ struct dxgkvmb_command_reservegpuvirtualaddress *command;
+ struct dxgkvmb_command_reservegpuvirtualaddress_return result;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result,
+ sizeof(result));
+ args->virtual_address = result.virtual_address;
+ ret = ntstatus2int(result.status);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_free_gpu_va(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_freegpuvirtualaddress *args)
+{
+ struct dxgkvmb_command_freegpuvirtualaddress *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_update_gpu_va(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_updategpuvirtualaddress *args)
+{
+ struct dxgkvmb_command_updategpuvirtualaddress *command;
+ u32 cmd_size;
+ u32 op_size;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (args->num_operations == 0 ||
+ (DXG_MAX_VM_BUS_PACKET_SIZE /
+ sizeof(struct d3dddi_updategpuvirtualaddress_operation)) <
+ args->num_operations) {
+ ret = -EINVAL;
+ pr_err("Invalid number of operations: %d",
+ args->num_operations);
+ goto cleanup;
+ }
+
+ op_size = args->num_operations *
+ sizeof(struct d3dddi_updategpuvirtualaddress_operation);
+ cmd_size = sizeof(struct dxgkvmb_command_updategpuvirtualaddress) +
+ op_size - sizeof(args->operations[0]);
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS,
+ process->host_handle);
+ command->fence_value = args->fence_value;
+ command->device = args->device;
+ command->context = args->context;
+ command->fence_object = args->fence_object;
+ command->num_operations = args->num_operations;
+ command->flags = args->flags.value;
+ ret = copy_from_user(command->operations, args->operations,
+ op_size);
+ if (ret) {
+ pr_err("%s failed to copy operations", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+static void set_result(struct d3dkmt_createsynchronizationobject2 *args,
+ u64 fence_gpu_va, u8 *va)
+{
+ args->info.periodic_monitored_fence.fence_gpu_virtual_address =
+ fence_gpu_va;
+ args->info.periodic_monitored_fence.fence_cpu_virtual_address = va;
+}
+
+int
+dxgvmb_send_create_sync_object(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_createsynchronizationobject2 *args,
+ struct dxgsyncobject *syncobj)
+{
+ struct dxgkvmb_command_createsyncobject_return result = { };
+ struct dxgkvmb_command_createsyncobject *command;
+ int ret;
+ u8 *va = 0;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CREATESYNCOBJECT,
+ process->host_handle);
+ command->args = *args;
+ command->client_hint = 1; /* CLIENTHINT_UMD */
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result,
+ sizeof(result));
+ if (ret < 0) {
+ pr_err("%s failed %d", __func__, ret);
+ goto cleanup;
+ }
+ args->sync_object = result.sync_object;
+ if (syncobj->shared)
+ args->info.shared_handle = result.global_sync_object;
+
+ if (syncobj->monitored_fence) {
+ va = dxg_map_iospace(result.fence_storage_address, PAGE_SIZE,
+ PROT_READ | PROT_WRITE, true);
+ if (va == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ if (args->info.type == _D3DDDI_MONITORED_FENCE) {
+ args->info.monitored_fence.fence_gpu_virtual_address =
+ result.fence_gpu_va;
+ args->info.monitored_fence.fence_cpu_virtual_address =
+ va;
+ {
+ unsigned long value;
+
+ dev_dbg(dxgglobaldev, "fence cpu va: %p", va);
+ ret = copy_from_user(&value, va,
+ sizeof(u64));
+ if (ret) {
+ pr_err("failed to read fence");
+ ret = -EINVAL;
+ } else {
+ dev_dbg(dxgglobaldev, "fence value:%lx",
+ value);
+ }
+ }
+ } else {
+ set_result(args, result.fence_gpu_va, va);
+ }
+ syncobj->mapped_address = va;
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_signal_sync_object(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dddicb_signalflags flags,
+ u64 legacy_fence_value,
+ struct d3dkmthandle context,
+ u32 object_count,
+ struct d3dkmthandle __user *objects,
+ u32 context_count,
+ struct d3dkmthandle __user *contexts,
+ u32 fence_count,
+ u64 __user *fences,
+ struct eventfd_ctx *cpu_event_handle,
+ struct d3dkmthandle device)
+{
+ int ret;
+ struct dxgkvmb_command_signalsyncobject *command;
+ u32 object_size = object_count * sizeof(struct d3dkmthandle);
+ u32 context_size = context_count * sizeof(struct d3dkmthandle);
+ u32 fence_size = fences ? fence_count * sizeof(u64) : 0;
+ u8 *current_pos;
+ u32 cmd_size = sizeof(struct dxgkvmb_command_signalsyncobject) +
+ object_size + context_size + fence_size;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (context.v)
+ cmd_size += sizeof(struct d3dkmthandle);
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_SIGNALSYNCOBJECT,
+ process->host_handle);
+
+ if (flags.enqueue_cpu_event)
+ command->cpu_event_handle = (u64) cpu_event_handle;
+ else
+ command->device = device;
+ command->flags = flags;
+ command->fence_value = legacy_fence_value;
+ command->object_count = object_count;
+ command->context_count = context_count;
+ current_pos = (u8 *) &command[1];
+ ret = copy_from_user(current_pos, objects, object_size);
+ if (ret) {
+ pr_err("Failed to read objects %p %d",
+ objects, object_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ current_pos += object_size;
+ if (context.v) {
+ command->context_count++;
+ *(struct d3dkmthandle *) current_pos = context;
+ current_pos += sizeof(struct d3dkmthandle);
+ }
+ if (context_size) {
+ ret = copy_from_user(current_pos, contexts, context_size);
+ if (ret) {
+ pr_err("Failed to read contexts %p %d",
+ contexts, context_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ current_pos += context_size;
+ }
+ if (fence_size) {
+ ret = copy_from_user(current_pos, fences, fence_size);
+ if (ret) {
+ pr_err("Failed to read fences %p %d",
+ fences, fence_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ if (dxgglobal->async_msg_enabled) {
+ command->hdr.async_msg = 1;
+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size);
+ } else {
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr,
+ msg.size);
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct
+ d3dkmt_waitforsynchronizationobjectfromcpu
+ *args,
+ bool user_address,
+ u64 cpu_event)
+{
+ int ret = -EINVAL;
+ struct dxgkvmb_command_waitforsyncobjectfromcpu *command;
+ u32 object_size = args->object_count * sizeof(struct d3dkmthandle);
+ u32 fence_size = args->object_count * sizeof(u64);
+ u8 *current_pos;
+ u32 cmd_size = sizeof(*command) + object_size + fence_size;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU,
+ process->host_handle);
+ command->device = args->device;
+ command->flags = args->flags;
+ command->object_count = args->object_count;
+ command->guest_event_pointer = (u64) cpu_event;
+ current_pos = (u8 *) &command[1];
+ if (user_address) {
+ ret = copy_from_user(current_pos, args->objects, object_size);
+ if (ret) {
+ pr_err("%s failed to copy objects", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ current_pos += object_size;
+ ret = copy_from_user(current_pos, args->fence_values,
+ fence_size);
+ if (ret) {
+ pr_err("%s failed to copy fences", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ } else {
+ memcpy(current_pos, args->objects, object_size);
+ current_pos += object_size;
+ memcpy(current_pos, args->fence_values, fence_size);
+ }
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle context,
+ u32 object_count,
+ struct d3dkmthandle *objects,
+ u64 *fences,
+ bool legacy_fence)
+{
+ int ret;
+ struct dxgkvmb_command_waitforsyncobjectfromgpu *command;
+ u32 fence_size = object_count * sizeof(u64);
+ u32 object_size = object_count * sizeof(struct d3dkmthandle);
+ u8 *current_pos;
+ u32 cmd_size = object_size + fence_size - sizeof(u64) +
+ sizeof(struct dxgkvmb_command_waitforsyncobjectfromgpu);
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (object_count == 0 || object_count > D3DDDI_MAX_OBJECT_WAITED_ON) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU,
+ process->host_handle);
+ command->context = context;
+ command->object_count = object_count;
+ command->legacy_fence_object = legacy_fence;
+ current_pos = (u8 *) command->fence_values;
+ memcpy(current_pos, fences, fence_size);
+ current_pos += fence_size;
+ memcpy(current_pos, objects, object_size);
+
+ if (dxgglobal->async_msg_enabled) {
+ command->hdr.async_msg = 1;
+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size);
+ } else {
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr,
+ msg.size);
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_lock2(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_lock2 *args,
+ struct d3dkmt_lock2 *__user outargs)
+{
+ int ret;
+ struct dxgkvmb_command_lock2 *command;
+ struct dxgkvmb_command_lock2_return result = { };
+ struct dxgallocation *alloc = NULL;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_LOCK2, process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result.status);
+ if (ret < 0)
+ goto cleanup;
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ alloc = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ args->allocation, true);
+ if (alloc == NULL) {
+ pr_err("%s invalid alloc", __func__);
+ ret = -EINVAL;
+ } else {
+ if (alloc->cpu_address) {
+ args->data = alloc->cpu_address;
+ if (alloc->cpu_address_mapped)
+ alloc->cpu_address_refcount++;
+ } else {
+ u64 offset = (u64)result.cpu_visible_buffer_offset;
+
+ args->data = dxg_map_iospace(offset,
+ alloc->num_pages << PAGE_SHIFT,
+ PROT_READ | PROT_WRITE, alloc->cached);
+ if (args->data) {
+ alloc->cpu_address_refcount = 1;
+ alloc->cpu_address_mapped = true;
+ alloc->cpu_address = args->data;
+ }
+ }
+ if (args->data == NULL) {
+ ret = -ENOMEM;
+ } else {
+ ret = copy_to_user(&outargs->data, &args->data,
+ sizeof(args->data));
+ if (ret) {
+ pr_err("%s failed to copy data", __func__);
+ ret = -EINVAL;
+ alloc->cpu_address_refcount--;
+ if (alloc->cpu_address_refcount == 0) {
+ dxg_unmap_iospace(alloc->cpu_address,
+ alloc->num_pages << PAGE_SHIFT);
+ alloc->cpu_address_mapped = false;
+ alloc->cpu_address = NULL;
+ }
+ }
+ }
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_unlock2(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_unlock2 *args)
+{
+ int ret;
+ struct dxgkvmb_command_unlock2 *command;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_UNLOCK2,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_update_alloc_property(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dddi_updateallocproperty *args,
+ struct d3dddi_updateallocproperty *__user
+ inargs)
+{
+ int ret;
+ int ret1;
+ struct dxgkvmb_command_updateallocationproperty *command;
+ struct dxgkvmb_command_updateallocationproperty_return result = { };
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+
+ if (ret < 0)
+ goto cleanup;
+ ret = ntstatus2int(result.status);
+ /* STATUS_PENING is a success code > 0 */
+ if (ret == STATUS_PENDING) {
+ ret1 = copy_to_user(&inargs->paging_fence_value,
+ &result.paging_fence_value,
+ sizeof(u64));
+ if (ret1) {
+ pr_err("%s failed to copy paging fence", __func__);
+ ret = -EINVAL;
+ }
+ }
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_markdeviceaserror *args)
+{
+ struct dxgkvmb_command_markdeviceaserror *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_MARKDEVICEASERROR,
+ process->host_handle);
+ command->args = *args;
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_setallocationpriority *args)
+{
+ u32 cmd_size = sizeof(struct dxgkvmb_command_setallocationpriority);
+ u32 alloc_size = 0;
+ u32 priority_size = 0;
+ struct dxgkvmb_command_setallocationpriority *command;
+ int ret;
+ struct d3dkmthandle *allocations;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (args->resource.v) {
+ priority_size = sizeof(u32);
+ if (args->allocation_count != 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ } else {
+ if (args->allocation_count == 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ alloc_size = args->allocation_count *
+ sizeof(struct d3dkmthandle);
+ cmd_size += alloc_size;
+ priority_size = sizeof(u32) * args->allocation_count;
+ }
+ cmd_size += priority_size;
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY,
+ process->host_handle);
+ command->device = args->device;
+ command->allocation_count = args->allocation_count;
+ command->resource = args->resource;
+ allocations = (struct d3dkmthandle *) &command[1];
+ ret = copy_from_user(allocations, args->allocation_list,
+ alloc_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_from_user((u8 *) allocations + alloc_size,
+ args->priorities, priority_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc priority", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_getallocationpriority *args)
+{
+ u32 cmd_size = sizeof(struct dxgkvmb_command_getallocationpriority);
+ u32 result_size;
+ u32 alloc_size = 0;
+ u32 priority_size = 0;
+ struct dxgkvmb_command_getallocationpriority *command;
+ struct dxgkvmb_command_getallocationpriority_return *result;
+ int ret;
+ struct d3dkmthandle *allocations;
+ struct dxgvmbusmsgres msg = {.hdr = NULL};
+
+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (args->resource.v) {
+ priority_size = sizeof(u32);
+ if (args->allocation_count != 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ } else {
+ if (args->allocation_count == 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ alloc_size = args->allocation_count *
+ sizeof(struct d3dkmthandle);
+ cmd_size += alloc_size;
+ priority_size = sizeof(u32) * args->allocation_count;
+ }
+ result_size = sizeof(*result) + priority_size;
+
+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ result = msg.res;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY,
+ process->host_handle);
+ command->device = args->device;
+ command->allocation_count = args->allocation_count;
+ command->resource = args->resource;
+ allocations = (struct d3dkmthandle *) &command[1];
+ ret = copy_from_user(allocations, args->allocation_list,
+ alloc_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr,
+ msg.size + msg.res_size,
+ result, msg.res_size);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(result->status);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(args->priorities,
+ (u8 *) result + sizeof(*result),
+ priority_size);
+ if (ret) {
+ pr_err("%s failed to copy priorities", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ free_message((struct dxgvmbusmsg *)&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle context,
+ int priority,
+ bool in_process)
+{
+ struct dxgkvmb_command_setcontextschedulingpriority2 *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY,
+ process->host_handle);
+ command->context = context;
+ command->priority = priority;
+ command->in_process = in_process;
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle context,
+ int *priority,
+ bool in_process)
+{
+ struct dxgkvmb_command_getcontextschedulingpriority *command;
+ struct dxgkvmb_command_getcontextschedulingpriority_return result = { };
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY,
+ process->host_handle);
+ command->context = context;
+ command->in_process = in_process;
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ &result, sizeof(result));
+ if (ret >= 0) {
+ ret = ntstatus2int(result.status);
+ *priority = result.priority;
+ }
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_offer_allocations(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_offerallocations *args)
+{
+ struct dxgkvmb_command_offerallocations *command;
+ int ret = -EINVAL;
+ u32 alloc_size = sizeof(struct d3dkmthandle) * args->allocation_count;
+ u32 cmd_size = sizeof(struct dxgkvmb_command_offerallocations) +
+ alloc_size - sizeof(struct d3dkmthandle);
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_OFFERALLOCATIONS,
+ process->host_handle);
+ command->flags = args->flags;
+ command->priority = args->priority;
+ command->device = args->device;
+ command->allocation_count = args->allocation_count;
+ if (args->resources) {
+ command->resources = true;
+ ret = copy_from_user(command->allocations, args->resources,
+ alloc_size);
+ } else {
+ ret = copy_from_user(command->allocations,
+ args->allocations, alloc_size);
+ }
+ if (ret) {
+ pr_err("%s failed to copy input handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle device,
+ struct d3dkmt_reclaimallocations2 *args,
+ u64 __user *paging_fence_value)
+{
+ struct dxgkvmb_command_reclaimallocations *command;
+ struct dxgkvmb_command_reclaimallocations_return *result;
+ int ret;
+ u32 alloc_size = sizeof(struct d3dkmthandle) * args->allocation_count;
+ u32 cmd_size = sizeof(struct dxgkvmb_command_reclaimallocations) +
+ alloc_size - sizeof(struct d3dkmthandle);
+ u32 result_size = sizeof(*result);
+ struct dxgvmbusmsgres msg = {.hdr = NULL};
+
+ if (args->results)
+ result_size += (args->allocation_count - 1) *
+ sizeof(enum d3dddi_reclaim_result);
+
+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+ result = msg.res;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_RECLAIMALLOCATIONS,
+ process->host_handle);
+ command->device = device;
+ command->paging_queue = args->paging_queue;
+ command->allocation_count = args->allocation_count;
+ command->write_results = args->results != NULL;
+ if (args->resources) {
+ command->resources = true;
+ ret = copy_from_user(command->allocations, args->resources,
+ alloc_size);
+ } else {
+ ret = copy_from_user(command->allocations,
+ args->allocations, alloc_size);
+ }
+ if (ret) {
+ pr_err("%s failed to copy input handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ result, msg.res_size);
+ if (ret < 0)
+ goto cleanup;
+ ret = copy_to_user(paging_fence_value,
+ &result->paging_fence_value, sizeof(u64));
+ if (ret) {
+ pr_err("%s failed to copy paging fence", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = ntstatus2int(result->status);
+ if (NT_SUCCESS(result->status) && args->results) {
+ ret = copy_to_user(args->results, result->discarded,
+ sizeof(result->discarded[0]) *
+ args->allocation_count);
+ if (ret) {
+ pr_err("%s failed to copy results", __func__);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ free_message((struct dxgvmbusmsg *)&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle other_process,
+ struct
+ d3dkmt_changevideomemoryreservation
+ *args)
+{
+ struct dxgkvmb_command_changevideomemoryreservation *command;
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION,
+ process->host_handle);
+ command->args = *args;
+ command->args.process = other_process.v;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_createhwqueue *args,
+ struct d3dkmt_createhwqueue *__user inargs,
+ struct dxghwqueue *hwqueue)
+{
+ struct dxgkvmb_command_createhwqueue *command = NULL;
+ u32 cmd_size = sizeof(struct dxgkvmb_command_createhwqueue);
+ int ret;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("invalid private driver data size");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args->priv_drv_data_size)
+ cmd_size += args->priv_drv_data_size - 1;
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_CREATEHWQUEUE,
+ process->host_handle);
+ command->context = args->context;
+ command->flags = args->flags;
+ command->priv_drv_data_size = args->priv_drv_data_size;
+ if (args->priv_drv_data_size) {
+ ret = copy_from_user(command->priv_drv_data,
+ args->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy private data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ command, cmd_size);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ntstatus2int(command->status);
+ if (ret < 0) {
+ pr_err("dxgvmb_send_sync_msg failed: %x", command->status.v);
+ goto cleanup;
+ }
+
+ ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ command->hwqueue);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = hmgrtable_assign_handle_safe(&process->handle_table,
+ NULL,
+ HMGRENTRY_TYPE_MONITOREDFENCE,
+ command->hwqueue_progress_fence);
+ if (ret < 0)
+ goto cleanup;
+
+ hwqueue->handle = command->hwqueue;
+ hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence;
+
+ hwqueue->progress_fence_mapped_address =
+ dxg_map_iospace((u64)command->hwqueue_progress_fence_cpuva,
+ PAGE_SIZE, PROT_READ | PROT_WRITE, true);
+ if (hwqueue->progress_fence_mapped_address == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = copy_to_user(&inargs->queue, &command->hwqueue,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy hwqueue handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(&inargs->queue_progress_fence,
+ &command->hwqueue_progress_fence,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to progress fence", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(&inargs->queue_progress_fence_cpu_va,
+ &hwqueue->progress_fence_mapped_address,
+ sizeof(inargs->queue_progress_fence_cpu_va));
+ if (ret) {
+ pr_err("%s failed to copy fence cpu va", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(&inargs->queue_progress_fence_gpu_va,
+ &command->hwqueue_progress_fence_gpuva,
+ sizeof(u64));
+ if (ret) {
+ pr_err("%s failed to copy fence gpu va", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (args->priv_drv_data_size) {
+ ret = copy_to_user(args->priv_drv_data,
+ command->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy private data", __func__);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ if (ret < 0) {
+ pr_err("%s failed %x", __func__, ret);
+ if (hwqueue->handle.v) {
+ hmgrtable_free_handle_safe(&process->handle_table,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ hwqueue->handle);
+ hwqueue->handle.v = 0;
+ }
+ if (command && command->hwqueue.v)
+ dxgvmb_send_destroy_hwqueue(process, adapter,
+ command->hwqueue);
+ }
+ free_message(&msg, process);
+ return ret;
+}
+
+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmthandle handle)
+{
+ int ret;
+ struct dxgkvmb_command_destroyhwqueue *command;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, sizeof(*command));
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_DESTROYHWQUEUE,
+ process->host_handle);
+ command->hwqueue = handle;
+
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_queryadapterinfo *args)
+{
+ struct dxgkvmb_command_queryadapterinfo *command;
+ u32 cmd_size = sizeof(*command) + args->private_data_size - 1;
+ int ret;
+ u32 private_data_size;
+ void *private_data;
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ ret = copy_from_user(command->private_data,
+ args->private_data, args->private_data_size);
+ if (ret) {
+ pr_err("%s Faled to copy private data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_QUERYADAPTERINFO,
+ process->host_handle);
+ command->private_data_size = args->private_data_size;
+ command->query_type = args->type;
+
+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) {
+ private_data = msg.msg;
+ private_data_size = command->private_data_size +
+ sizeof(struct ntstatus);
+ } else {
+ private_data = command->private_data;
+ private_data_size = command->private_data_size;
+ }
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ private_data, private_data_size);
+ if (ret < 0)
+ goto cleanup;
+
+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) {
+ ret = ntstatus2int(*(struct ntstatus *)private_data);
+ if (ret < 0)
+ goto cleanup;
+ private_data = (char *)private_data + sizeof(struct ntstatus);
+ }
+
+ switch (args->type) {
+ case _KMTQAITYPE_ADAPTERTYPE:
+ case _KMTQAITYPE_ADAPTERTYPE_RENDER:
+ {
+ struct d3dkmt_adaptertype *adapter_type =
+ (void *)private_data;
+ adapter_type->paravirtualized = 1;
+ adapter_type->display_supported = 0;
+ adapter_type->post_device = 0;
+ adapter_type->indirect_display_device = 0;
+ adapter_type->acg_supported = 0;
+ adapter_type->support_set_timings_from_vidpn = 0;
+ break;
+ }
+ default:
+ break;
+ }
+ ret = copy_to_user(args->private_data, private_data,
+ args->private_data_size);
+ if (ret) {
+ pr_err("%s Faled to copy private data to user", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_submitcommandtohwqueue
+ *args)
+{
+ int ret = -EINVAL;
+ u32 cmd_size;
+ struct dxgkvmb_command_submitcommandtohwqueue *command;
+ u32 primaries_size = args->num_primaries * sizeof(struct d3dkmthandle);
+ struct dxgvmbusmsg msg = {.hdr = NULL};
+
+ cmd_size = sizeof(*command) + args->priv_drv_data_size + primaries_size;
+ ret = init_message(&msg, adapter, process, cmd_size);
+ if (ret)
+ goto cleanup;
+ command = (void *)msg.msg;
+
+ if (primaries_size) {
+ ret = copy_from_user(&command[1], args->written_primaries,
+ primaries_size);
+ if (ret) {
+ pr_err("%s failed to copy primaries handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ if (args->priv_drv_data_size) {
+ ret = copy_from_user((char *)&command[1] + primaries_size,
+ args->priv_drv_data,
+ args->priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy primaries data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE,
+ process->host_handle);
+ command->args = *args;
+
+ if (dxgglobal->async_msg_enabled) {
+ command->hdr.async_msg = 1;
+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size);
+ } else {
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr,
+ msg.size);
+ }
+
+cleanup:
+ free_message(&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+int dxgvmb_send_query_statistics(struct dxgprocess *process,
+ struct dxgadapter *adapter,
+ struct d3dkmt_querystatistics *args)
+{
+ struct dxgkvmb_command_querystatistics *command;
+ struct dxgkvmb_command_querystatistics_return *result;
+ int ret;
+ struct dxgvmbusmsgres msg = {.hdr = NULL};
+
+ ret = init_message_res(&msg, adapter, process, sizeof(*command),
+ sizeof(*result));
+ if (ret)
+ goto cleanup;
+ command = msg.msg;
+ result = msg.res;
+
+ command_vgpu_to_host_init2(&command->hdr,
+ DXGK_VMBCOMMAND_QUERYSTATISTICS,
+ process->host_handle);
+ command->args = *args;
+
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
+ result, msg.res_size);
+ if (ret < 0)
+ goto cleanup;
+
+ args->result = result->result;
+ ret = ntstatus2int(result->status);
+
+cleanup:
+ free_message((struct dxgvmbusmsg *)&msg, process);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
diff --git a/virtio_dxgkrnl/dxgvmbus.h b/virtio_dxgkrnl/dxgvmbus.h
new file mode 100644
index 0000000..1ed10c0
--- /dev/null
+++ b/virtio_dxgkrnl/dxgvmbus.h
@@ -0,0 +1,953 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * VM bus interface with the host definitions
+ *
+ */
+
+#ifndef _DXGVMBUS_H
+#define _DXGVMBUS_H
+
+struct dxgprocess;
+struct dxgadapter;
+
+#define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128)
+#define DXG_MAX_OBJECT_COUNT 0xFFF
+
+#define DXGK_DECL_VMBUS_OUTPUTSIZE(Type)\
+ ((sizeof(##Type) + 0x7) & ~(u32)0x7)
+#define DXGK_DECL_VMBUS_ALIGN_FOR_OUTPUT(Size) (((Size) + 0x7) & ~(u32)0x7)
+/*
+ * Defines a structure, which has the size, multiple of 8 bytes.
+ */
+#define DXGK_DECL_ALIGNED8_STRUCT(Type, Name, OutputSize) \
+ const u32 _Size = DXGK_DECL_VMBUS_OUTPUTSIZE(Type); \
+ u8 _AlignedStruct[_Size]; \
+ ##Type & Name = (##Type &)_AlignedStruct; \
+ u32 OutputSize = _Size
+
+#define DXGK_BUFFER_VMBUS_ALIGNED(Buffer) (((Buffer) & 7) == 0)
+
+
+#define RING_BUFSIZE (256 * 1024)
+
+/*
+ * The structure is used to track VM bus packets, waiting for completion.
+ */
+struct dxgvmbuspacket {
+ struct list_head packet_list_entry;
+ u64 request_id;
+ struct completion wait;
+ void *buffer;
+ u32 buffer_length;
+ int status;
+ bool completed;
+};
+
+struct dxgvmb_ext_header {
+ /* Offset from the start of the message to DXGKVMB_COMMAND_BASE */
+ u32 command_offset;
+ u32 reserved;
+ struct winluid vgpu_luid;
+};
+
+#define VMBUSMESSAGEONSTACK 0
+
+struct dxgvmbusmsg {
+/* Points to the allocated buffer */
+ struct dxgvmb_ext_header *hdr;
+/* Points to dxgkvmb_command_vm_to_host or dxgkvmb_command_vgpu_to_host */
+ void *msg;
+/* The vm bus channel, used to pass the message to the host */
+ struct dxgvmbuschannel *channel;
+/* Message size in bytes including the header and the payload */
+ u32 size;
+/* Buffer used for small messages */
+ char msg_on_stack[VMBUSMESSAGEONSTACK];
+};
+
+struct dxgvmbusmsgres {
+/* Points to the allocated buffer */
+ struct dxgvmb_ext_header *hdr;
+/* Points to dxgkvmb_command_vm_to_host or dxgkvmb_command_vgpu_to_host */
+ void *msg;
+/* The vm bus channel, used to pass the message to the host */
+ struct dxgvmbuschannel *channel;
+/* Message size in bytes including the header, the payload and the result */
+ u32 size;
+/* Result buffer size in bytes */
+ u32 res_size;
+/* Points to the result within the allocated buffer */
+ void *res;
+};
+
+enum dxgkvmb_commandchanneltype {
+ DXGKVMB_VGPU_TO_HOST,
+ DXGKVMB_VM_TO_HOST,
+ DXGKVMB_HOST_TO_VM
+};
+
+/*
+ *
+ * Commands, sent to the host via the guest global VM bus channel
+ * DXG_GUEST_GLOBAL_VMBUS
+ *
+ */
+
+#define DXG_VM_PROCESS_NAME_LENGTH 260
+
+enum dxgkvmb_commandtype_global {
+ DXGK_VMBCOMMAND_VM_TO_HOST_FIRST = 1000,
+ DXGK_VMBCOMMAND_CREATEPROCESS = DXGK_VMBCOMMAND_VM_TO_HOST_FIRST,
+ DXGK_VMBCOMMAND_DESTROYPROCESS = 1001,
+ DXGK_VMBCOMMAND_OPENSYNCOBJECT = 1002,
+ DXGK_VMBCOMMAND_DESTROYSYNCOBJECT = 1003,
+ DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT = 1004,
+ DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT = 1005,
+ DXGK_VMBCOMMAND_SIGNALFENCE = 1006,
+ DXGK_VMBCOMMAND_NOTIFYPROCESSFREEZE = 1007,
+ DXGK_VMBCOMMAND_NOTIFYPROCESSTHAW = 1008,
+ DXGK_VMBCOMMAND_QUERYETWSESSION = 1009,
+ DXGK_VMBCOMMAND_SETIOSPACEREGION = 1010,
+ DXGK_VMBCOMMAND_COMPLETETRANSACTION = 1011,
+ DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST = 1021,
+ DXGK_VMBCOMMAND_PRESENTVIRTUAL = 1031,
+ DXGK_VMBCOMMAND_INVALID_VM_TO_HOST
+};
+
+/*
+ *
+ * Commands, sent to the host via the per adapter VM bus channel
+ * DXG_GUEST_VGPU_VMBUS
+ *
+ */
+
+enum dxgkvmb_commandtype {
+ DXGK_VMBCOMMAND_CREATEDEVICE = 0,
+ DXGK_VMBCOMMAND_DESTROYDEVICE = 1,
+ DXGK_VMBCOMMAND_QUERYADAPTERINFO = 2,
+ DXGK_VMBCOMMAND_DDIQUERYADAPTERINFO = 3,
+ DXGK_VMBCOMMAND_CREATEALLOCATION = 4,
+ DXGK_VMBCOMMAND_DESTROYALLOCATION = 5,
+ DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL = 6,
+ DXGK_VMBCOMMAND_DESTROYCONTEXT = 7,
+ DXGK_VMBCOMMAND_CREATESYNCOBJECT = 8,
+ DXGK_VMBCOMMAND_CREATEPAGINGQUEUE = 9,
+ DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE = 10,
+ DXGK_VMBCOMMAND_MAKERESIDENT = 11,
+ DXGK_VMBCOMMAND_EVICT = 12,
+ DXGK_VMBCOMMAND_ESCAPE = 13,
+ DXGK_VMBCOMMAND_OPENADAPTER = 14,
+ DXGK_VMBCOMMAND_CLOSEADAPTER = 15,
+ DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS = 16,
+ DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS = 17,
+ DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS = 18,
+ DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS = 19,
+ DXGK_VMBCOMMAND_SUBMITCOMMAND = 20,
+ dxgk_vmbcommand_queryvideomemoryinfo = 21,
+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU = 22,
+ DXGK_VMBCOMMAND_LOCK2 = 23,
+ DXGK_VMBCOMMAND_UNLOCK2 = 24,
+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU = 25,
+ DXGK_VMBCOMMAND_SIGNALSYNCOBJECT = 26,
+ DXGK_VMBCOMMAND_SIGNALFENCENTSHAREDBYREF = 27,
+ dxgk_vmbcommand_getdevicestate = 28,
+ DXGK_VMBCOMMAND_MARKDEVICEASERROR = 29,
+ DXGK_VMBCOMMAND_ADAPTERSTOP = 30,
+ DXGK_VMBCOMMAND_SETQUEUEDLIMIT = 31,
+ DXGK_VMBCOMMAND_OPENRESOURCE = 32,
+ DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY = 33,
+ DXGK_VMBCOMMAND_PRESENTHISTORYTOKEN = 34,
+ DXGK_VMBCOMMAND_SETREDIRECTEDFLIPFENCEVALUE = 35,
+ DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO = 36,
+ DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS = 37,
+ DXGK_VMBCOMMAND_BLT = 38,
+ DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA = 39,
+ DXGK_VMBCOMMAND_CDDGDICOMMAND = 40,
+ DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY = 41,
+ DXGK_VMBCOMMAND_FLUSHDEVICE = 42,
+ DXGK_VMBCOMMAND_FLUSHADAPTER = 43,
+ DXGK_VMBCOMMAND_DDIGETNODEMETADATA = 44,
+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE = 45,
+ DXGK_VMBCOMMAND_ISSYNCOBJECTSIGNALED = 46,
+ DXGK_VMBCOMMAND_CDDSYNCGPUACCESS = 47,
+ DXGK_VMBCOMMAND_QUERYSTATISTICS = 48,
+ DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION = 49,
+ DXGK_VMBCOMMAND_CREATEHWQUEUE = 50,
+ DXGK_VMBCOMMAND_DESTROYHWQUEUE = 51,
+ DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE = 52,
+ DXGK_VMBCOMMAND_GETDRIVERSTOREFILE = 53,
+ DXGK_VMBCOMMAND_READDRIVERSTOREFILE = 54,
+ DXGK_VMBCOMMAND_GETNEXTHARDLINK = 55,
+ DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY = 56,
+ DXGK_VMBCOMMAND_OFFERALLOCATIONS = 57,
+ DXGK_VMBCOMMAND_RECLAIMALLOCATIONS = 58,
+ DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY = 59,
+ DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY = 60,
+ DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY = 61,
+ DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION = 62,
+ DXGK_VMBCOMMAND_QUERYRESOURCEINFO = 64,
+ DXGK_VMBCOMMAND_LOGEVENT = 65,
+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMPAGES = 66,
+ DXGK_VMBCOMMAND_INVALID
+};
+
+enum dxgkvmb_commandtype_host_to_vm {
+ DXGK_VMBCOMMAND_SIGNALGUESTEVENT,
+ DXGK_VMBCOMMAND_PROPAGATEPRESENTHISTORYTOKEN,
+ DXGK_VMBCOMMAND_SETGUESTDATA,
+ DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE,
+ DXGK_VMBCOMMAND_SENDWNFNOTIFICATION,
+ DXGK_VMBCOMMAND_INVALID_HOST_TO_VM
+};
+
+struct dxgkvmb_command_vm_to_host {
+ u64 command_id;
+ struct d3dkmthandle process;
+ enum dxgkvmb_commandchanneltype channel_type;
+ enum dxgkvmb_commandtype_global command_type;
+};
+
+struct dxgkvmb_command_vgpu_to_host {
+ u64 command_id;
+ struct d3dkmthandle process;
+ u32 channel_type : 8;
+ u32 async_msg : 1;
+ u32 reserved : 23;
+ enum dxgkvmb_commandtype command_type;
+};
+
+struct dxgkvmb_command_host_to_vm {
+ u64 command_id;
+ struct d3dkmthandle process;
+ u32 channel_type : 8;
+ u32 async_msg : 1;
+ u32 reserved : 23;
+ enum dxgkvmb_commandtype_host_to_vm command_type;
+};
+
+struct dxgkvmb_command_signalguestevent {
+ struct dxgkvmb_command_host_to_vm hdr;
+ u64 event;
+ u64 process_id;
+ bool dereference_event;
+};
+
+enum set_guestdata_type {
+ SETGUESTDATA_DATATYPE_DWORD = 0,
+ SETGUESTDATA_DATATYPE_UINT64 = 1
+};
+
+struct dxgkvmb_command_setguestdata {
+ struct dxgkvmb_command_host_to_vm hdr;
+ void *guest_pointer;
+ union {
+ u64 data64;
+ u32 data32;
+ };
+ u32 dereference : 1;
+ u32 data_type : 4;
+};
+
+struct dxgkvmb_command_opensyncobject {
+ struct dxgkvmb_command_vm_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle global_sync_object;
+ u32 engine_affinity;
+ struct d3dddi_synchronizationobject_flags flags;
+};
+
+struct dxgkvmb_command_opensyncobject_return {
+ struct d3dkmthandle sync_object;
+ struct ntstatus status;
+ u64 gpu_virtual_address;
+ u64 guest_cpu_physical_address;
+};
+
+/*
+ * The command returns struct d3dkmthandle of a shared object for the
+ * given pre-process object
+ */
+struct dxgkvmb_command_createntsharedobject {
+ struct dxgkvmb_command_vm_to_host hdr;
+ struct d3dkmthandle object;
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroyntsharedobject {
+ struct dxgkvmb_command_vm_to_host hdr;
+ struct d3dkmthandle shared_handle;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setiospaceregion {
+ struct dxgkvmb_command_vm_to_host hdr;
+ u64 start;
+ u64 length;
+ u32 shared_page_gpadl;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setexistingsysmemstore {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle allocation;
+ u32 gpadl;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setexistingsysmempages {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle allocation;
+ u32 num_pages;
+ u32 alloc_offset_in_pages;
+ /* u64 pfn_array[num_pages] */
+};
+
+struct dxgkvmb_command_createprocess {
+ struct dxgkvmb_command_vm_to_host hdr;
+ void *process;
+ u64 process_id;
+ u16 process_name[DXG_VM_PROCESS_NAME_LENGTH + 1];
+ u8 csrss_process:1;
+ u8 dwm_process:1;
+ u8 wow64_process:1;
+ u8 linux_process:1;
+};
+
+struct dxgkvmb_command_createprocess_return {
+ struct d3dkmthandle hprocess;
+};
+
+// The command returns ntstatus
+struct dxgkvmb_command_destroyprocess {
+ struct dxgkvmb_command_vm_to_host hdr;
+};
+
+struct dxgkvmb_command_openadapter {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ u32 vmbus_interface_version;
+ u32 vmbus_last_compatible_interface_version;
+ struct winluid guest_adapter_luid;
+};
+
+struct dxgkvmb_command_openadapter_return {
+ struct d3dkmthandle host_adapter_handle;
+ struct ntstatus status;
+ u32 vmbus_interface_version;
+ u32 vmbus_last_compatible_interface_version;
+};
+
+struct dxgkvmb_command_closeadapter {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle host_handle;
+};
+
+struct dxgkvmb_command_getinternaladapterinfo {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+};
+
+struct dxgkvmb_command_getinternaladapterinfo_return {
+ struct dxgk_device_types device_types;
+ u32 driver_store_copy_mode;
+ u32 driver_ddi_version;
+ u32 secure_virtual_machine : 1;
+ u32 virtual_machine_reset : 1;
+ u32 is_vail_supported : 1;
+ u32 hw_sch_enabled : 1;
+ u32 hw_sch_capable : 1;
+ u32 va_backed_vm : 1;
+ u32 async_msg_enabled : 1;
+ u32 hw_support_state : 2;
+ u32 reserved : 23;
+ struct winluid host_adapter_luid;
+ u16 device_description[80];
+ u16 device_instance_id[WIN_MAX_PATH];
+ struct winluid host_vgpu_luid;
+};
+
+struct dxgkvmb_command_queryadapterinfo {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ enum kmtqueryadapterinfotype query_type;
+ u32 private_data_size;
+ u8 private_data[1];
+};
+
+struct dxgkvmb_command_queryadapterinfo_return {
+ struct ntstatus status;
+ u8 private_data[1];
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setallocationpriority {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+ u32 allocation_count;
+ /* struct d3dkmthandle allocations[allocation_count or 0]; */
+ /* u32 priorities[allocation_count or 1]; */
+};
+
+struct dxgkvmb_command_getallocationpriority {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+ u32 allocation_count;
+ /* struct d3dkmthandle allocations[allocation_count or 0]; */
+};
+
+struct dxgkvmb_command_getallocationpriority_return {
+ struct ntstatus status;
+ /* u32 priorities[allocation_count or 1]; */
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setcontextschedulingpriority {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle context;
+ int priority;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_setcontextschedulingpriority2 {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle context;
+ int priority;
+ bool in_process;
+};
+
+struct dxgkvmb_command_getcontextschedulingpriority {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle context;
+ bool in_process;
+};
+
+struct dxgkvmb_command_getcontextschedulingpriority_return {
+ struct ntstatus status;
+ int priority;
+};
+
+struct dxgkvmb_command_createdevice {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_createdeviceflags flags;
+ bool cdd_device;
+ void *error_code;
+};
+
+struct dxgkvmb_command_createdevice_return {
+ struct d3dkmthandle device;
+};
+
+struct dxgkvmb_command_destroydevice {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+};
+
+struct dxgkvmb_command_flushdevice {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ enum dxgdevice_flushschedulerreason reason;
+};
+
+struct dxgkvmb_command_makeresident {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle paging_queue;
+ struct d3dddi_makeresident_flags flags;
+ u32 alloc_count;
+ struct d3dkmthandle allocations[1];
+};
+
+struct dxgkvmb_command_makeresident_return {
+ u64 paging_fence_value;
+ u64 num_bytes_to_trim;
+ struct ntstatus status;
+};
+
+struct dxgkvmb_command_evict {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dddi_evict_flags flags;
+ u32 alloc_count;
+ struct d3dkmthandle allocations[1];
+};
+
+struct dxgkvmb_command_evict_return {
+ u64 num_bytes_to_trim;
+};
+
+struct dxgkvmb_command_submitcommand {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_submitcommand args;
+ /* HistoryBufferHandles */
+ /* PrivateDriverData */
+};
+
+struct dxgkvmb_command_submitcommandtohwqueue {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_submitcommandtohwqueue args;
+ /* Written primaries */
+ /* PrivateDriverData */
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_flushheaptransitions {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+};
+
+struct dxgkvmb_command_freegpuvirtualaddress {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_freegpuvirtualaddress args;
+};
+
+struct dxgkvmb_command_mapgpuvirtualaddress {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dddi_mapgpuvirtualaddress args;
+ struct d3dkmthandle device;
+};
+
+struct dxgkvmb_command_mapgpuvirtualaddress_return {
+ u64 virtual_address;
+ u64 paging_fence_value;
+ struct ntstatus status;
+};
+
+struct dxgkvmb_command_reservegpuvirtualaddress {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dddi_reservegpuvirtualaddress args;
+};
+
+struct dxgkvmb_command_reservegpuvirtualaddress_return {
+ u64 virtual_address;
+ u64 paging_fence_value;
+ struct ntstatus status;
+};
+
+struct dxgkvmb_command_updategpuvirtualaddress {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ u64 fence_value;
+ struct d3dkmthandle device;
+ struct d3dkmthandle context;
+ struct d3dkmthandle fence_object;
+ u32 num_operations;
+ u32 flags;
+ struct d3dddi_updategpuvirtualaddress_operation operations[1];
+};
+
+struct dxgkvmb_command_queryclockcalibration {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_queryclockcalibration args;
+};
+
+struct dxgkvmb_command_queryclockcalibration_return {
+ struct ntstatus status;
+ struct dxgk_gpuclockdata clock_data;
+};
+
+struct dxgkvmb_command_createallocation_allocinfo {
+ u32 flags;
+ u32 priv_drv_data_size;
+ u32 vidpn_source_id;
+};
+
+struct dxgkvmb_command_createallocation {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+ u32 private_runtime_data_size;
+ u32 priv_drv_data_size;
+ u32 alloc_count;
+ struct d3dkmt_createallocationflags flags;
+ u64 private_runtime_resource_handle;
+ bool make_resident;
+/* dxgkvmb_command_createallocation_allocinfo alloc_info[alloc_count]; */
+/* u8 private_rutime_data[private_runtime_data_size] */
+/* u8 priv_drv_data[] for each alloc_info */
+};
+
+struct dxgkvmb_command_openresource {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ bool nt_security_sharing;
+ struct d3dkmthandle global_share;
+ u32 allocation_count;
+ u32 total_priv_drv_data_size;
+};
+
+struct dxgkvmb_command_openresource_return {
+ struct d3dkmthandle resource;
+ struct ntstatus status;
+/* struct d3dkmthandle allocation[allocation_count]; */
+};
+
+struct dxgkvmb_command_querystatistics {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_querystatistics args;
+};
+
+struct dxgkvmb_command_querystatistics_return {
+ struct ntstatus status;
+ u32 reserved;
+ struct d3dkmt_querystatistics_result result;
+};
+
+struct dxgkvmb_command_getstandardallocprivdata {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ enum d3dkmdt_standardallocationtype alloc_type;
+ u32 priv_driver_data_size;
+ u32 priv_driver_resource_size;
+ u32 physical_adapter_index;
+ union {
+ struct d3dkmdt_sharedprimarysurfacedata primary;
+ struct d3dkmdt_shadowsurfacedata shadow;
+ struct d3dkmdt_stagingsurfacedata staging;
+ struct d3dkmdt_gdisurfacedata gdi_surface;
+ };
+};
+
+struct dxgkvmb_command_getstandardallocprivdata_return {
+ struct ntstatus status;
+ u32 priv_driver_data_size;
+ u32 priv_driver_resource_size;
+ union {
+ struct d3dkmdt_sharedprimarysurfacedata primary;
+ struct d3dkmdt_shadowsurfacedata shadow;
+ struct d3dkmdt_stagingsurfacedata staging;
+ struct d3dkmdt_gdisurfacedata gdi_surface;
+ };
+/* char alloc_priv_data[priv_driver_data_size]; */
+/* char resource_priv_data[priv_driver_resource_size]; */
+};
+
+struct dxgkarg_describeallocation {
+ u64 allocation;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 multisample_method;
+ struct d3dddi_rational refresh_rate;
+ u32 private_driver_attribute;
+ u32 flags;
+ u32 rotation;
+};
+
+struct dxgkvmb_allocflags {
+ union {
+ u32 flags;
+ struct {
+ u32 primary:1;
+ u32 cdd_primary:1;
+ u32 dod_primary:1;
+ u32 overlay:1;
+ u32 reserved6:1;
+ u32 capture:1;
+ u32 reserved0:4;
+ u32 reserved1:1;
+ u32 existing_sysmem:1;
+ u32 stereo:1;
+ u32 direct_flip:1;
+ u32 hardware_protected:1;
+ u32 reserved2:1;
+ u32 reserved3:1;
+ u32 reserved4:1;
+ u32 protected:1;
+ u32 cached:1;
+ u32 independent_primary:1;
+ u32 reserved:11;
+ };
+ };
+};
+
+struct dxgkvmb_command_allocinfo_return {
+ struct d3dkmthandle allocation;
+ u32 priv_drv_data_size;
+ struct dxgkvmb_allocflags allocation_flags;
+ u64 allocation_size;
+ struct dxgkarg_describeallocation driver_info;
+};
+
+struct dxgkvmb_command_createallocation_return {
+ struct ntstatus status;
+ struct d3dkmt_createallocationflags flags;
+ struct d3dkmthandle resource;
+ struct d3dkmthandle global_share;
+ u32 vgpu_flags;
+ struct dxgkvmb_command_allocinfo_return allocation_info[1];
+ /* Private driver data for allocations */
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroyallocation {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+ u32 alloc_count;
+ struct d3dddicb_destroyallocation2flags flags;
+ struct d3dkmthandle allocations[1];
+};
+
+struct dxgkvmb_command_createcontextvirtual {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle context;
+ struct d3dkmthandle device;
+ u32 node_ordinal;
+ u32 engine_affinity;
+ struct d3dddi_createcontextflags flags;
+ enum d3dkmt_clienthint client_hint;
+ u32 priv_drv_data_size;
+ u8 priv_drv_data[1];
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroycontext {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle context;
+};
+
+struct dxgkvmb_command_createpagingqueue {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_createpagingqueue args;
+};
+
+struct dxgkvmb_command_createpagingqueue_return {
+ struct d3dkmthandle paging_queue;
+ struct d3dkmthandle sync_object;
+ u64 fence_storage_physical_address;
+ u64 fence_storage_offset;
+};
+
+struct dxgkvmb_command_destroypagingqueue {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle paging_queue;
+};
+
+struct dxgkvmb_command_createsyncobject {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_createsynchronizationobject2 args;
+ u32 client_hint;
+};
+
+struct dxgkvmb_command_createsyncobject_return {
+ struct d3dkmthandle sync_object;
+ struct d3dkmthandle global_sync_object;
+ u64 fence_gpu_va;
+ u64 fence_storage_address;
+ u32 fence_storage_offset;
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroysyncobject {
+ struct dxgkvmb_command_vm_to_host hdr;
+ struct d3dkmthandle sync_object;
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_signalsyncobject {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ u32 object_count;
+ struct d3dddicb_signalflags flags;
+ u32 context_count;
+ u64 fence_value;
+ union {
+ /* Pointer to the guest event object */
+ u64 cpu_event_handle;
+ /* Non zero when signal from CPU is done */
+ struct d3dkmthandle device;
+ };
+ /* struct d3dkmthandle ObjectHandleArray[object_count] */
+ /* struct d3dkmthandle ContextArray[context_count] */
+ /* u64 MonitoredFenceValueArray[object_count] */
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_waitforsyncobjectfromcpu {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ u32 object_count;
+ struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags;
+ u64 guest_event_pointer;
+ bool dereference_event;
+ /* struct d3dkmthandle ObjectHandleArray[object_count] */
+ /* u64 FenceValueArray [object_count] */
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_waitforsyncobjectfromgpu {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle context;
+ /* Must be 1 when bLegacyFenceObject is TRUE */
+ u32 object_count;
+ bool legacy_fence_object;
+ u64 fence_values[1];
+ /* struct d3dkmthandle ObjectHandles[object_count] */
+};
+
+struct dxgkvmb_command_lock2 {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_lock2 args;
+ bool use_legacy_lock;
+ u32 flags;
+ u32 priv_drv_data;
+};
+
+struct dxgkvmb_command_lock2_return {
+ struct ntstatus status;
+ void *cpu_visible_buffer_offset;
+};
+
+struct dxgkvmb_command_unlock2 {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_unlock2 args;
+ bool use_legacy_unlock;
+};
+
+struct dxgkvmb_command_updateallocationproperty {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dddi_updateallocproperty args;
+};
+
+struct dxgkvmb_command_updateallocationproperty_return {
+ u64 paging_fence_value;
+ struct ntstatus status;
+};
+
+struct dxgkvmb_command_markdeviceaserror {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_markdeviceaserror args;
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_offerallocations {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ u32 allocation_count;
+ enum d3dkmt_offer_priority priority;
+ struct d3dkmt_offer_flags flags;
+ bool resources;
+ struct d3dkmthandle allocations[1];
+};
+
+struct dxgkvmb_command_reclaimallocations {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle device;
+ struct d3dkmthandle paging_queue;
+ u32 allocation_count;
+ bool resources;
+ bool write_results;
+ struct d3dkmthandle allocations[1];
+};
+
+struct dxgkvmb_command_reclaimallocations_return {
+ u64 paging_fence_value;
+ struct ntstatus status;
+ enum d3dddi_reclaim_result discarded[1];
+};
+
+/* Returns ntstatus */
+struct dxgkvmb_command_changevideomemoryreservation {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_changevideomemoryreservation args;
+};
+
+/* Returns the same structure */
+struct dxgkvmb_command_createhwqueue {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct ntstatus status;
+ struct d3dkmthandle hwqueue;
+ struct d3dkmthandle hwqueue_progress_fence;
+ void *hwqueue_progress_fence_cpuva;
+ u64 hwqueue_progress_fence_gpuva;
+ struct d3dkmthandle context;
+ struct d3dddi_createhwqueueflags flags;
+ u32 priv_drv_data_size;
+ char priv_drv_data[1];
+};
+
+/* The command returns ntstatus */
+struct dxgkvmb_command_destroyhwqueue {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle hwqueue;
+};
+
+struct dxgkvmb_command_queryallocationresidency {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_queryallocationresidency args;
+ /* struct d3dkmthandle allocations[0 or number of allocations] */
+};
+
+struct dxgkvmb_command_queryallocationresidency_return {
+ struct ntstatus status;
+ /* d3dkmt_allocationresidencystatus[NumAllocations] */
+};
+
+/* Returns only private data */
+struct dxgkvmb_command_escape {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle adapter;
+ struct d3dkmthandle device;
+ enum d3dkmt_escapetype type;
+ struct d3dddi_escapeflags flags;
+ u32 priv_drv_data_size;
+ struct d3dkmthandle context;
+ u8 priv_drv_data[1];
+};
+
+struct dxgkvmb_command_queryvideomemoryinfo {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmthandle adapter;
+ enum d3dkmt_memory_segment_group memory_segment_group;
+ u32 physical_adapter_index;
+};
+
+struct dxgkvmb_command_queryvideomemoryinfo_return {
+ u64 budget;
+ u64 current_usage;
+ u64 current_reservation;
+ u64 available_for_reservation;
+};
+
+struct dxgkvmb_command_getdevicestate {
+ struct dxgkvmb_command_vgpu_to_host hdr;
+ struct d3dkmt_getdevicestate args;
+};
+
+struct dxgkvmb_command_getdevicestate_return {
+ struct d3dkmt_getdevicestate args;
+ struct ntstatus status;
+};
+
+struct dxgkvmb_command_shareobjectwithhost {
+ struct dxgkvmb_command_vm_to_host hdr;
+ struct d3dkmthandle device_handle;
+ struct d3dkmthandle object_handle;
+ u64 reserved;
+};
+
+struct dxgkvmb_command_shareobjectwithhost_return {
+ struct ntstatus status;
+ u32 alignment;
+ u64 vail_nt_handle;
+};
+
+struct dxgkvmb_command_presentvirtual {
+ struct dxgkvmb_command_vm_to_host hdr;
+ u64 acquire_semaphore_nthandle;
+ u64 release_semaphore_nthandle;
+ u64 composition_memory_nthandle;
+ u64 private_data_size;
+ // prviate_data
+};
+
+int
+dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel,
+ void *command, u32 command_size, void *result,
+ u32 result_size);
+
+#endif /* _DXGVMBUS_H */
diff --git a/virtio_dxgkrnl/hmgr.c b/virtio_dxgkrnl/hmgr.c
new file mode 100644
index 0000000..c1ff0ca
--- /dev/null
+++ b/virtio_dxgkrnl/hmgr.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Handle manager implementation
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+
+#include "misc.h"
+#include "dxgkrnl.h"
+#include "hmgr.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+
+const struct d3dkmthandle zerohandle;
+
+/*
+ * Handle parameters
+ */
+#define HMGRHANDLE_INSTANCE_BITS 6
+#define HMGRHANDLE_INDEX_BITS 24
+#define HMGRHANDLE_UNIQUE_BITS 2
+
+#define HMGRHANDLE_INSTANCE_SHIFT 0
+#define HMGRHANDLE_INDEX_SHIFT \
+ (HMGRHANDLE_INSTANCE_BITS + HMGRHANDLE_INSTANCE_SHIFT)
+#define HMGRHANDLE_UNIQUE_SHIFT \
+ (HMGRHANDLE_INDEX_BITS + HMGRHANDLE_INDEX_SHIFT)
+
+#define HMGRHANDLE_INSTANCE_MASK \
+ (((1 << HMGRHANDLE_INSTANCE_BITS) - 1) << HMGRHANDLE_INSTANCE_SHIFT)
+#define HMGRHANDLE_INDEX_MASK \
+ (((1 << HMGRHANDLE_INDEX_BITS) - 1) << HMGRHANDLE_INDEX_SHIFT)
+#define HMGRHANDLE_UNIQUE_MASK \
+ (((1 << HMGRHANDLE_UNIQUE_BITS) - 1) << HMGRHANDLE_UNIQUE_SHIFT)
+
+#define HMGRHANDLE_INSTANCE_MAX ((1 << HMGRHANDLE_INSTANCE_BITS) - 1)
+#define HMGRHANDLE_INDEX_MAX ((1 << HMGRHANDLE_INDEX_BITS) - 1)
+#define HMGRHANDLE_UNIQUE_MAX ((1 << HMGRHANDLE_UNIQUE_BITS) - 1)
+
+/*
+ * Handle entry
+ */
+struct hmgrentry {
+ union {
+ void *object;
+ struct {
+ u32 prev_free_index;
+ u32 next_free_index;
+ };
+ };
+ u32 type:HMGRENTRY_TYPE_BITS + 1;
+ u32 unique:HMGRHANDLE_UNIQUE_BITS;
+ u32 instance:HMGRHANDLE_INSTANCE_BITS;
+ u32 destroyed:1;
+};
+
+#define HMGRTABLE_SIZE_INCREMENT 1024
+#define HMGRTABLE_MIN_FREE_ENTRIES 128
+#define HMGRTABLE_INVALID_INDEX (~((1 << HMGRHANDLE_INDEX_BITS) - 1))
+#define HMGRTABLE_SIZE_MAX 0xFFFFFFF
+
+static u32 table_size_increment = HMGRTABLE_SIZE_INCREMENT;
+
+static u32 get_unique(struct d3dkmthandle h)
+{
+ return (h.v & HMGRHANDLE_UNIQUE_MASK) >> HMGRHANDLE_UNIQUE_SHIFT;
+}
+
+static u32 get_index(struct d3dkmthandle h)
+{
+ return (h.v & HMGRHANDLE_INDEX_MASK) >> HMGRHANDLE_INDEX_SHIFT;
+}
+
+u32 get_instance(struct d3dkmthandle h)
+{
+ return (h.v & HMGRHANDLE_INSTANCE_MASK) >> HMGRHANDLE_INSTANCE_SHIFT;
+}
+
+static bool is_handle_valid(struct hmgrtable *table, struct d3dkmthandle h,
+ bool ignore_destroyed, enum hmgrentry_type t,
+ bool error_on_invalid_index)
+{
+ u32 index = get_index(h);
+ u32 unique = get_unique(h);
+ struct hmgrentry *entry;
+
+ if (index >= table->table_size) {
+ if (error_on_invalid_index)
+ pr_err("%s Invalid index %x %d\n", __func__, h.v, index);
+ return false;
+ }
+
+ entry = &table->entry_table[index];
+ if (unique != entry->unique) {
+ if (error_on_invalid_index)
+ pr_err("%s Invalid unique %x %d %d %d %p",
+ __func__, h.v, unique, entry->unique,
+ index, entry->object);
+ return false;
+ }
+
+ if (entry->destroyed && !ignore_destroyed) {
+ if (error_on_invalid_index)
+ pr_err("%s Invalid destroyed", __func__);
+ return false;
+ }
+
+ if (entry->type == HMGRENTRY_TYPE_FREE) {
+ if (error_on_invalid_index)
+ pr_err("%s Entry is freed %x %d", __func__, h.v, index);
+ return false;
+ }
+
+ if (t != HMGRENTRY_TYPE_FREE && t != entry->type) {
+ if (error_on_invalid_index)
+ pr_err("%s type mismatch %x %d %d", __func__, h.v,
+ t, entry->type);
+ return false;
+ }
+
+ return true;
+}
+
+static struct d3dkmthandle build_handle(u32 index, u32 unique, u32 instance)
+{
+ struct d3dkmthandle handle;
+
+ handle.v = (index << HMGRHANDLE_INDEX_SHIFT) & HMGRHANDLE_INDEX_MASK;
+ handle.v |= (unique << HMGRHANDLE_UNIQUE_SHIFT) &
+ HMGRHANDLE_UNIQUE_MASK;
+ handle.v |= (instance << HMGRHANDLE_INSTANCE_SHIFT) &
+ HMGRHANDLE_INSTANCE_MASK;
+
+ return handle;
+}
+
+inline u32 hmgrtable_get_used_entry_count(struct hmgrtable *table)
+{
+ DXGKRNL_ASSERT(table->table_size >= table->free_count);
+ return (table->table_size - table->free_count);
+}
+
+bool hmgrtable_mark_destroyed(struct hmgrtable *table, struct d3dkmthandle h)
+{
+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE, true))
+ return false;
+
+ table->entry_table[get_index(h)].destroyed = true;
+ return true;
+}
+
+bool hmgrtable_unmark_destroyed(struct hmgrtable *table, struct d3dkmthandle h)
+{
+ if (!is_handle_valid(table, h, true, HMGRENTRY_TYPE_FREE, true))
+ return true;
+
+ DXGKRNL_ASSERT(table->entry_table[get_index(h)].destroyed);
+ table->entry_table[get_index(h)].destroyed = 0;
+ return true;
+}
+
+void print_status(struct hmgrtable *table)
+{
+ int i;
+
+ dev_dbg(dxgglobaldev, "hmgrtable head, tail %p %d %d\n",
+ table, table->free_handle_list_head,
+ table->free_handle_list_tail);
+ if (table->entry_table == NULL)
+ return;
+ for (i = 0; i < 3; i++) {
+ if (table->entry_table[i].type != HMGRENTRY_TYPE_FREE)
+ dev_dbg(dxgglobaldev, "hmgrtable entry %p %d %p\n",
+ table, i, table->entry_table[i].object);
+ else
+ dev_dbg(dxgglobaldev, "hmgrtable entry %p %d %d %d\n",
+ table, i,
+ table->entry_table[i].next_free_index,
+ table->entry_table[i].prev_free_index);
+ }
+}
+
+static bool expand_table(struct hmgrtable *table, u32 NumEntries)
+{
+ u32 new_table_size;
+ struct hmgrentry *new_entry;
+ u32 table_index;
+ u32 new_free_count;
+ u32 prev_free_index;
+ u32 tail_index = table->free_handle_list_tail;
+
+ /* The tail should point to the last free element in the list */
+ if (table->free_count != 0) {
+ if (tail_index >= table->table_size ||
+ table->entry_table[tail_index].next_free_index !=
+ HMGRTABLE_INVALID_INDEX) {
+ pr_err("%s:corruption\n", __func__);
+ pr_err("tail_index: %x", tail_index);
+ pr_err("table size: %x", table->table_size);
+ pr_err("free_count: %d", table->free_count);
+ pr_err("NumEntries: %x", NumEntries);
+ return false;
+ }
+ }
+
+ new_free_count = table_size_increment + table->free_count;
+ new_table_size = table->table_size + table_size_increment;
+ if (new_table_size < NumEntries) {
+ new_free_count += NumEntries - new_table_size;
+ new_table_size = NumEntries;
+ }
+
+ if (new_table_size > HMGRHANDLE_INDEX_MAX) {
+ pr_err("%s:corruption\n", __func__);
+ return false;
+ }
+
+ new_entry = (struct hmgrentry *)
+ vzalloc(new_table_size * sizeof(struct hmgrentry));
+ if (new_entry == NULL) {
+ pr_err("%s:allocation failed\n", __func__);
+ return false;
+ }
+
+ if (table->entry_table) {
+ memcpy(new_entry, table->entry_table,
+ table->table_size * sizeof(struct hmgrentry));
+ vfree(table->entry_table);
+ } else {
+ table->free_handle_list_head = 0;
+ }
+
+ table->entry_table = new_entry;
+
+ /* Initialize new table entries and add to the free list */
+ table_index = table->table_size;
+
+ prev_free_index = table->free_handle_list_tail;
+
+ while (table_index < new_table_size) {
+ struct hmgrentry *entry = &table->entry_table[table_index];
+
+ entry->prev_free_index = prev_free_index;
+ entry->next_free_index = table_index + 1;
+ entry->type = HMGRENTRY_TYPE_FREE;
+ entry->unique = 1;
+ entry->instance = 0;
+ prev_free_index = table_index;
+
+ table_index++;
+ }
+
+ table->entry_table[table_index - 1].next_free_index =
+ (u32) HMGRTABLE_INVALID_INDEX;
+
+ if (table->free_count != 0) {
+ /* Link the current free list with the new entries */
+ struct hmgrentry *entry;
+
+ entry = &table->entry_table[table->free_handle_list_tail];
+ entry->next_free_index = table->table_size;
+ }
+ table->free_handle_list_tail = new_table_size - 1;
+ if (table->free_handle_list_head == HMGRTABLE_INVALID_INDEX)
+ table->free_handle_list_head = table->table_size;
+
+ table->table_size = new_table_size;
+ table->free_count = new_free_count;
+
+ return true;
+}
+
+void hmgrtable_init(struct hmgrtable *table, struct dxgprocess *process)
+{
+ table->process = process;
+ table->entry_table = NULL;
+ table->table_size = 0;
+ table->free_handle_list_head = HMGRTABLE_INVALID_INDEX;
+ table->free_handle_list_tail = HMGRTABLE_INVALID_INDEX;
+ table->free_count = 0;
+ init_rwsem(&table->table_lock);
+}
+
+void hmgrtable_destroy(struct hmgrtable *table)
+{
+ if (table->entry_table) {
+ vfree(table->entry_table);
+ table->entry_table = NULL;
+ }
+}
+
+void hmgrtable_lock(struct hmgrtable *table, enum dxglockstate state)
+{
+ if (state == DXGLOCK_EXCL)
+ down_write(&table->table_lock);
+ else
+ down_read(&table->table_lock);
+}
+
+void hmgrtable_unlock(struct hmgrtable *table, enum dxglockstate state)
+{
+ if (state == DXGLOCK_EXCL)
+ up_write(&table->table_lock);
+ else
+ up_read(&table->table_lock);
+}
+
+struct d3dkmthandle hmgrtable_alloc_handle(struct hmgrtable *table,
+ void *object,
+ enum hmgrentry_type type,
+ bool make_valid)
+{
+ u32 index;
+ struct hmgrentry *entry;
+ u32 unique;
+
+ DXGKRNL_ASSERT(type <= HMGRENTRY_TYPE_LIMIT);
+ DXGKRNL_ASSERT(type > HMGRENTRY_TYPE_FREE);
+
+ if (table->free_count <= HMGRTABLE_MIN_FREE_ENTRIES) {
+ if (!expand_table(table, 0)) {
+ pr_err("hmgrtable expand_table failed\n");
+ return zerohandle;
+ }
+ }
+
+ if (table->free_handle_list_head >= table->table_size) {
+ pr_err("hmgrtable corrupted handle table head\n");
+ return zerohandle;
+ }
+
+ index = table->free_handle_list_head;
+ entry = &table->entry_table[index];
+
+ if (entry->type != HMGRENTRY_TYPE_FREE) {
+ pr_err("hmgrtable expected free handle\n");
+ return zerohandle;
+ }
+
+ table->free_handle_list_head = entry->next_free_index;
+
+ if (entry->next_free_index != table->free_handle_list_tail) {
+ if (entry->next_free_index >= table->table_size) {
+ pr_err("hmgrtable invalid next free index\n");
+ return zerohandle;
+ }
+ table->entry_table[entry->next_free_index].prev_free_index =
+ HMGRTABLE_INVALID_INDEX;
+ }
+
+ unique = table->entry_table[index].unique;
+
+ table->entry_table[index].object = object;
+ table->entry_table[index].type = type;
+ table->entry_table[index].instance = 0;
+ table->entry_table[index].destroyed = !make_valid;
+ table->free_count--;
+ DXGKRNL_ASSERT(table->free_count <= table->table_size);
+
+ return build_handle(index, unique, table->entry_table[index].instance);
+}
+
+int hmgrtable_assign_handle_safe(struct hmgrtable *table,
+ void *object,
+ enum hmgrentry_type type,
+ struct d3dkmthandle h)
+{
+ int ret;
+
+ hmgrtable_lock(table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(table, object, type, h);
+ hmgrtable_unlock(table, DXGLOCK_EXCL);
+ return ret;
+}
+
+int hmgrtable_assign_handle(struct hmgrtable *table, void *object,
+ enum hmgrentry_type type, struct d3dkmthandle h)
+{
+ u32 index = get_index(h);
+ u32 unique = get_unique(h);
+ struct hmgrentry *entry = NULL;
+
+ dev_dbg(dxgglobaldev, "%s %x, %d %p, %p\n",
+ __func__, h.v, index, object, table);
+
+ if (index >= HMGRHANDLE_INDEX_MAX) {
+ pr_err("handle index is too big: %x %d", h.v, index);
+ return -EINVAL;
+ }
+
+ if (index >= table->table_size) {
+ u32 new_size = index + table_size_increment;
+
+ if (new_size > HMGRHANDLE_INDEX_MAX)
+ new_size = HMGRHANDLE_INDEX_MAX;
+ if (!expand_table(table, new_size)) {
+ pr_err("failed to expand handle table %d", new_size);
+ return -ENOMEM;
+ }
+ }
+
+ entry = &table->entry_table[index];
+
+ if (entry->type != HMGRENTRY_TYPE_FREE) {
+ pr_err("the entry is already busy: %d %x",
+ entry->type,
+ hmgrtable_build_entry_handle(table, index).v);
+ return -EINVAL;
+ }
+
+ if (index != table->free_handle_list_tail) {
+ if (entry->next_free_index >= table->table_size) {
+ pr_err("hmgr: invalid next free index %d\n",
+ entry->next_free_index);
+ return -EINVAL;
+ }
+ table->entry_table[entry->next_free_index].prev_free_index =
+ entry->prev_free_index;
+ } else {
+ table->free_handle_list_tail = entry->prev_free_index;
+ }
+
+ if (index != table->free_handle_list_head) {
+ if (entry->prev_free_index >= table->table_size) {
+ pr_err("hmgr: invalid next prev index %d\n",
+ entry->prev_free_index);
+ return -EINVAL;
+ }
+ table->entry_table[entry->prev_free_index].next_free_index =
+ entry->next_free_index;
+ } else {
+ table->free_handle_list_head = entry->next_free_index;
+ }
+
+ entry->prev_free_index = HMGRTABLE_INVALID_INDEX;
+ entry->next_free_index = HMGRTABLE_INVALID_INDEX;
+ entry->object = object;
+ entry->type = type;
+ entry->instance = 0;
+ entry->unique = unique;
+ entry->destroyed = false;
+
+ table->free_count--;
+ DXGKRNL_ASSERT(table->free_count <= table->table_size);
+ return 0;
+}
+
+struct d3dkmthandle hmgrtable_alloc_handle_safe(struct hmgrtable *table,
+ void *obj,
+ enum hmgrentry_type type,
+ bool make_valid)
+{
+ struct d3dkmthandle h;
+
+ hmgrtable_lock(table, DXGLOCK_EXCL);
+ h = hmgrtable_alloc_handle(table, obj, type, make_valid);
+ hmgrtable_unlock(table, DXGLOCK_EXCL);
+ return h;
+}
+
+void hmgrtable_free_handle(struct hmgrtable *table, enum hmgrentry_type t,
+ struct d3dkmthandle h)
+{
+ struct hmgrentry *entry;
+ u32 i = get_index(h);
+
+ dev_dbg(dxgglobaldev, "%s: %p %x\n", __func__, table, h.v);
+
+ /* Ignore the destroyed flag when checking the handle */
+ if (is_handle_valid(table, h, true, t, true)) {
+ DXGKRNL_ASSERT(table->free_count < table->table_size);
+ entry = &table->entry_table[i];
+ entry->unique = 1;
+ entry->type = HMGRENTRY_TYPE_FREE;
+ entry->destroyed = 0;
+ if (entry->unique != HMGRHANDLE_UNIQUE_MAX)
+ entry->unique += 1;
+ else
+ entry->unique = 1;
+
+ table->free_count++;
+ DXGKRNL_ASSERT(table->free_count <= table->table_size);
+
+ /*
+ * Insert the index to the free list at the tail.
+ */
+ entry->next_free_index = HMGRTABLE_INVALID_INDEX;
+ entry->prev_free_index = table->free_handle_list_tail;
+ entry = &table->entry_table[table->free_handle_list_tail];
+ entry->next_free_index = i;
+ table->free_handle_list_tail = i;
+ } else {
+ pr_err("%s:error: %d %x\n", __func__, i, h.v);
+ }
+}
+
+void hmgrtable_free_handle_safe(struct hmgrtable *table, enum hmgrentry_type t,
+ struct d3dkmthandle h)
+{
+ hmgrtable_lock(table, DXGLOCK_EXCL);
+ hmgrtable_free_handle(table, t, h);
+ hmgrtable_unlock(table, DXGLOCK_EXCL);
+}
+
+struct d3dkmthandle hmgrtable_build_entry_handle(struct hmgrtable *table,
+ u32 index)
+{
+ DXGKRNL_ASSERT(index < table->table_size);
+
+ return build_handle(index, table->entry_table[index].unique,
+ table->entry_table[index].instance);
+}
+
+void *hmgrtable_get_object(struct hmgrtable *table, struct d3dkmthandle h)
+{
+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE, true))
+ return NULL;
+
+ return table->entry_table[get_index(h)].object;
+}
+
+void *hmgrtable_get_object_by_type(struct hmgrtable *table,
+ enum hmgrentry_type type,
+ struct d3dkmthandle h,
+ bool error_on_missing)
+{
+ if (!is_handle_valid(table, h, false, type, error_on_missing)) {
+ if (error_on_missing)
+ pr_err("%s invalid handle %x\n", __func__, h.v);
+
+ return NULL;
+ }
+ return table->entry_table[get_index(h)].object;
+}
+
+void *hmgrtable_get_entry_object(struct hmgrtable *table, u32 index)
+{
+ DXGKRNL_ASSERT(index < table->table_size);
+ DXGKRNL_ASSERT(table->entry_table[index].type != HMGRENTRY_TYPE_FREE);
+
+ return table->entry_table[index].object;
+}
+
+enum hmgrentry_type hmgrtable_get_entry_type(struct hmgrtable *table,
+ u32 index)
+{
+ DXGKRNL_ASSERT(index < table->table_size);
+ return (enum hmgrentry_type)table->entry_table[index].type;
+}
+
+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *table,
+ struct d3dkmthandle h)
+{
+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE, true))
+ return HMGRENTRY_TYPE_FREE;
+
+ return hmgrtable_get_entry_type(table, get_index(h));
+}
+
+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *table,
+ struct d3dkmthandle h,
+ enum hmgrentry_type type)
+{
+ if (!is_handle_valid(table, h, true, type, true))
+ return NULL;
+ return table->entry_table[get_index(h)].object;
+}
+
+bool hmgrtable_next_entry(struct hmgrtable *tbl,
+ u32 *index,
+ enum hmgrentry_type *type,
+ struct d3dkmthandle *handle,
+ void **object)
+{
+ u32 i;
+ struct hmgrentry *entry;
+
+ for (i = *index; i < tbl->table_size; i++) {
+ entry = &tbl->entry_table[i];
+ if (entry->type != HMGRENTRY_TYPE_FREE) {
+ *index = i + 1;
+ *object = entry->object;
+ *handle = build_handle(i, entry->unique,
+ entry->instance);
+ *type = entry->type;
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/virtio_dxgkrnl/hmgr.h b/virtio_dxgkrnl/hmgr.h
new file mode 100644
index 0000000..88dee58
--- /dev/null
+++ b/virtio_dxgkrnl/hmgr.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Handle manager definitions
+ *
+ */
+
+#ifndef _HMGR_H_
+#define _HMGR_H_
+
+#include "misc.h"
+
+struct hmgrentry;
+
+/*
+ * Handle manager table.
+ *
+ * Implementation notes:
+ * A list of free handles is built on top of the array of table entries.
+ * free_handle_list_head is the index of the first entry in the list.
+ * m_FreeHandleListTail is the index of an entry in the list, which is
+ * HMGRTABLE_MIN_FREE_ENTRIES from the head. It means that when a handle is
+ * freed, the next time the handle can be re-used is after allocating
+ * HMGRTABLE_MIN_FREE_ENTRIES number of handles.
+ * Handles are allocated from the start of the list and free handles are
+ * inserted after the tail of the list.
+ *
+ */
+struct hmgrtable {
+ struct dxgprocess *process;
+ struct hmgrentry *entry_table;
+ u32 free_handle_list_head;
+ u32 free_handle_list_tail;
+ u32 table_size;
+ u32 free_count;
+ struct rw_semaphore table_lock;
+};
+
+/*
+ * Handle entry data types.
+ */
+#define HMGRENTRY_TYPE_BITS 5
+
+enum hmgrentry_type {
+ HMGRENTRY_TYPE_FREE = 0,
+ HMGRENTRY_TYPE_DXGADAPTER = 1,
+ HMGRENTRY_TYPE_DXGSHAREDRESOURCE = 2,
+ HMGRENTRY_TYPE_DXGDEVICE = 3,
+ HMGRENTRY_TYPE_DXGRESOURCE = 4,
+ HMGRENTRY_TYPE_DXGALLOCATION = 5,
+ HMGRENTRY_TYPE_DXGOVERLAY = 6,
+ HMGRENTRY_TYPE_DXGCONTEXT = 7,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT = 8,
+ HMGRENTRY_TYPE_DXGKEYEDMUTEX = 9,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE = 10,
+ HMGRENTRY_TYPE_DXGDEVICESYNCOBJECT = 11,
+ HMGRENTRY_TYPE_DXGPROCESS = 12,
+ HMGRENTRY_TYPE_DXGSHAREDVMOBJECT = 13,
+ HMGRENTRY_TYPE_DXGPROTECTEDSESSION = 14,
+ HMGRENTRY_TYPE_DXGHWQUEUE = 15,
+ HMGRENTRY_TYPE_DXGREMOTEBUNDLEOBJECT = 16,
+ HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEOBJECT = 17,
+ HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEPROXY = 18,
+ HMGRENTRY_TYPE_DXGTRACKEDWORKLOAD = 19,
+ HMGRENTRY_TYPE_LIMIT = ((1 << HMGRENTRY_TYPE_BITS) - 1),
+ HMGRENTRY_TYPE_MONITOREDFENCE = HMGRENTRY_TYPE_LIMIT + 1,
+};
+
+void hmgrtable_init(struct hmgrtable *tbl, struct dxgprocess *process);
+void hmgrtable_destroy(struct hmgrtable *tbl);
+void hmgrtable_lock(struct hmgrtable *tbl, enum dxglockstate state);
+void hmgrtable_unlock(struct hmgrtable *tbl, enum dxglockstate state);
+struct d3dkmthandle hmgrtable_alloc_handle(struct hmgrtable *tbl, void *object,
+ enum hmgrentry_type t, bool make_valid);
+struct d3dkmthandle hmgrtable_alloc_handle_safe(struct hmgrtable *tbl,
+ void *obj,
+ enum hmgrentry_type t,
+ bool reserve);
+int hmgrtable_assign_handle(struct hmgrtable *tbl, void *obj,
+ enum hmgrentry_type, struct d3dkmthandle h);
+int hmgrtable_assign_handle_safe(struct hmgrtable *tbl, void *obj,
+ enum hmgrentry_type t, struct d3dkmthandle h);
+void hmgrtable_free_handle(struct hmgrtable *tbl, enum hmgrentry_type t,
+ struct d3dkmthandle h);
+void hmgrtable_free_handle_safe(struct hmgrtable *tbl, enum hmgrentry_type t,
+ struct d3dkmthandle h);
+struct d3dkmthandle hmgrtable_build_entry_handle(struct hmgrtable *tbl,
+ u32 index);
+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *tbl,
+ struct d3dkmthandle h);
+void *hmgrtable_get_object(struct hmgrtable *tbl, struct d3dkmthandle h);
+void *hmgrtable_get_object_by_type(struct hmgrtable *tbl, enum hmgrentry_type t,
+ struct d3dkmthandle h, bool error_on_missing);
+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *tbl,
+ struct d3dkmthandle h,
+ enum hmgrentry_type t);
+bool hmgrtable_mark_destroyed(struct hmgrtable *tbl, struct d3dkmthandle h);
+bool hmgrtable_unmark_destroyed(struct hmgrtable *tbl, struct d3dkmthandle h);
+void *hmgrtable_get_entry_object(struct hmgrtable *tbl, u32 index);
+bool hmgrtable_next_entry(struct hmgrtable *tbl,
+ u32 *start_index,
+ enum hmgrentry_type *type,
+ struct d3dkmthandle *handle,
+ void **object);
+
+#endif
diff --git a/virtio_dxgkrnl/include/uapi/d3dkmthk.h b/virtio_dxgkrnl/include/uapi/d3dkmthk.h
new file mode 100644
index 0000000..9f280c6
--- /dev/null
+++ b/virtio_dxgkrnl/include/uapi/d3dkmthk.h
@@ -0,0 +1,1972 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * User mode WDDM interface definitions
+ *
+ */
+
+#ifndef _D3DKMTHK_H
+#define _D3DKMTHK_H
+
+/*
+ * This structure matches the definition of D3DKMTHANDLE in Windows.
+ * The handle is opaque in user mode. It is used by user mode applications to
+ * represent kernel mode objects, created by dxgkrnl.
+ */
+struct d3dkmthandle {
+ union {
+ struct {
+ __u32 instance : 6;
+ __u32 index : 24;
+ __u32 unique : 2;
+ };
+ __u32 v;
+ };
+};
+
+/*
+ * VM bus messages return Windows' NTSTATUS, which is integer and only negative
+ * value indicates a failure. A positive number is a success and needs to be
+ * returned to user mode as the IOCTL return code. Negative status codes are
+ * converted to Linux error codes.
+ */
+struct ntstatus {
+ union {
+ struct {
+ int code : 16;
+ int facility : 13;
+ int customer : 1;
+ int severity : 2;
+ };
+ int v;
+ };
+};
+
+/* Matches Windows LUID definition */
+struct winluid {
+ __u32 a;
+ __u32 b;
+};
+
+struct large_integer {
+ union {
+ struct {
+ unsigned long lowpart;
+ long highpart;
+ } u;
+ int64_t quadpart;
+ };
+};
+
+#define D3DDDI_MAX_WRITTEN_PRIMARIES 16
+#define D3DDDI_MAX_MPO_PRESENT_DIRTY_RECTS 0xFFF
+
+#define D3DKMT_CREATEALLOCATION_MAX 1024
+#define D3DKMT_MAKERESIDENT_ALLOC_MAX (1024 * 10)
+#define D3DKMT_ADAPTERS_MAX 64
+#define D3DDDI_MAX_BROADCAST_CONTEXT 64
+#define D3DDDI_MAX_OBJECT_WAITED_ON 32
+#define D3DDDI_MAX_OBJECT_SIGNALED 32
+
+struct d3dkmt_adapterinfo {
+ struct d3dkmthandle adapter_handle;
+ struct winluid adapter_luid;
+ __u32 num_sources;
+ __u32 present_move_regions_preferred;
+};
+
+struct d3dkmt_enumadapters2 {
+ __u32 num_adapters;
+ __u32 reserved;
+#ifdef __KERNEL__
+ struct d3dkmt_adapterinfo *adapters;
+#else
+ __u64 *adapters;
+#endif
+};
+
+struct d3dkmt_closeadapter {
+ struct d3dkmthandle adapter_handle;
+};
+
+struct d3dkmt_openadapterfromluid {
+ struct winluid adapter_luid;
+ struct d3dkmthandle adapter_handle;
+};
+
+struct d3dddi_allocationlist {
+ struct d3dkmthandle allocation;
+ union {
+ struct {
+ __u32 write_operation :1;
+ __u32 do_not_retire_instance :1;
+ __u32 offer_priority :3;
+ __u32 reserved :27;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dddi_patchlocationlist {
+ __u32 allocation_index;
+ union {
+ struct {
+ __u32 slot_id:24;
+ __u32 reserved:8;
+ };
+ __u32 value;
+ };
+ __u32 driver_id;
+ __u32 allocation_offset;
+ __u32 patch_offset;
+ __u32 split_offset;
+};
+
+struct d3dkmt_createdeviceflags {
+ __u32 legacy_mode:1;
+ __u32 request_vSync:1;
+ __u32 disable_gpu_timeout:1;
+ __u32 gdi_device:1;
+ __u32 reserved:28;
+};
+
+struct d3dkmt_createdevice {
+ struct d3dkmthandle adapter;
+ __u32 reserved3;
+ struct d3dkmt_createdeviceflags flags;
+ struct d3dkmthandle device;
+#ifdef __KERNEL__
+ void *command_buffer;
+#else
+ __u64 command_buffer;
+#endif
+ __u32 command_buffer_size;
+ __u32 reserved;
+#ifdef __KERNEL__
+ struct d3dddi_allocationlist *allocation_list;
+#else
+ __u64 allocation_list;
+#endif
+ __u32 allocation_list_size;
+ __u32 reserved1;
+#ifdef __KERNEL__
+ struct d3dddi_patchlocationlist *patch_location_list;
+#else
+ __u64 patch_location_list;
+#endif
+ __u32 patch_location_list_size;
+ __u32 reserved2;
+};
+
+struct d3dkmt_destroydevice {
+ struct d3dkmthandle device;
+};
+
+enum d3dkmt_clienthint {
+ _D3DKMT_CLIENTHNT_UNKNOWN = 0,
+ _D3DKMT_CLIENTHINT_OPENGL = 1,
+ _D3DKMT_CLIENTHINT_CDD = 2,
+ _D3DKMT_CLIENTHINT_DX7 = 7,
+ _D3DKMT_CLIENTHINT_DX8 = 8,
+ _D3DKMT_CLIENTHINT_DX9 = 9,
+ _D3DKMT_CLIENTHINT_DX10 = 10,
+};
+
+struct d3dddi_createcontextflags {
+ union {
+ struct {
+ __u32 null_rendering:1;
+ __u32 initial_data:1;
+ __u32 disable_gpu_timeout:1;
+ __u32 synchronization_only:1;
+ __u32 hw_queue_supported:1;
+ __u32 reserved:27;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_createcontext {
+ struct d3dkmthandle device;
+ __u32 node_ordinal;
+ __u32 engine_affinity;
+ struct d3dddi_createcontextflags flags;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+ enum d3dkmt_clienthint client_hint;
+ struct d3dkmthandle context;
+#ifdef __KERNEL__
+ void *command_buffer;
+#else
+ __u64 command_buffer;
+#endif
+ __u32 command_buffer_size;
+#ifdef __KERNEL__
+ struct d3dddi_allocationlist *allocation_list;
+#else
+ __u64 allocation_list;
+#endif
+ __u32 allocation_list_size;
+#ifdef __KERNEL__
+ struct d3dddi_patchlocationlist *patch_location_list;
+#else
+ __u64 patch_location_list;
+#endif
+ __u32 patch_location_list_size;
+ __u64 obsolete;
+};
+
+struct d3dkmt_destroycontext {
+ struct d3dkmthandle context;
+};
+
+struct d3dkmt_createcontextvirtual {
+ struct d3dkmthandle device;
+ __u32 node_ordinal;
+ __u32 engine_affinity;
+ struct d3dddi_createcontextflags flags;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+ enum d3dkmt_clienthint client_hint;
+ struct d3dkmthandle context;
+};
+
+struct d3dddi_createhwcontextflags {
+ union {
+ struct {
+ __u32 reserved:32;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dddi_createhwqueueflags {
+ union {
+ struct {
+ __u32 disable_gpu_timeout:1;
+ __u32 reserved:31;
+ };
+ __u32 value;
+ };
+};
+
+enum d3dddi_pagingqueue_priority {
+ _D3DDDI_PAGINGQUEUE_PRIORITY_BELOW_NORMAL = -1,
+ _D3DDDI_PAGINGQUEUE_PRIORITY_NORMAL = 0,
+ _D3DDDI_PAGINGQUEUE_PRIORITY_ABOVE_NORMAL = 1,
+};
+
+struct d3dkmt_createpagingqueue {
+ struct d3dkmthandle device;
+ enum d3dddi_pagingqueue_priority priority;
+ struct d3dkmthandle paging_queue;
+ struct d3dkmthandle sync_object;
+#ifdef __KERNEL__
+ void *fence_cpu_virtual_address;
+#else
+ __u64 fence_cpu_virtual_address;
+#endif
+ __u32 physical_adapter_index;
+};
+
+struct d3dddi_destroypagingqueue {
+ struct d3dkmthandle paging_queue;
+};
+
+struct d3dkmt_renderflags {
+ __u32 resize_command_buffer:1;
+ __u32 resize_allocation_list:1;
+ __u32 resize_patch_location_list:1;
+ __u32 null_rendering:1;
+ __u32 present_redirected:1;
+ __u32 render_km:1;
+ __u32 render_km_readback:1;
+ __u32 reserved:25;
+};
+struct d3dkmt_render {
+ union {
+ struct d3dkmthandle device;
+ struct d3dkmthandle context;
+ };
+ __u32 command_offset;
+ __u32 command_length;
+ __u32 allocation_count;
+ __u32 patch_location_count;
+#ifdef __KERNEL__
+ void *new_command_buffer;
+#else
+ __u64 new_command_buffer;
+#endif
+ __u32 new_command_buffer_size;
+#ifdef __KERNEL__
+ struct d3dddi_allocationlist *new_allocation_list;
+#else
+ __u64 new_allocation_list;
+#endif
+ __u32 new_allocation_list_size;
+#ifdef __KERNEL__
+ struct d3dddi_patchlocationlist *new_patch_pocation_list;
+#else
+ __u64 new_patch_pocation_list;
+#endif
+ __u32 new_patch_pocation_list_size;
+ struct d3dkmt_renderflags flags;
+ __u64 present_history_token;
+ __u32 broadcast_context_count;
+ struct d3dkmthandle broadcast_context[D3DDDI_MAX_BROADCAST_CONTEXT];
+ __u32 queued_buffer_count;
+ __u64 obsolete;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+};
+
+enum d3dkmt_standardallocationtype {
+ _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1,
+ _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2,
+};
+
+struct d3dkmt_standardallocation_existingheap {
+ __u64 size;
+};
+
+struct d3dkmt_createstandardallocationflags {
+ union {
+ struct {
+ __u32 reserved:32;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_createstandardallocation {
+ enum d3dkmt_standardallocationtype type;
+ __u32 reserved;
+ struct d3dkmt_standardallocation_existingheap existing_heap_data;
+ struct d3dkmt_createstandardallocationflags flags;
+ __u32 reserved1;
+};
+
+struct d3dddi_allocationinfo {
+ struct d3dkmthandle allocation;
+#ifdef __KERNEL__
+ const void *sysmem;
+#else
+ __u64 sysmem;
+#endif
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+ __u32 vidpn_source_id;
+ union {
+ struct {
+ __u32 primary:1;
+ __u32 stereo:1;
+ __u32 override_priority:1;
+ __u32 reserved:29;
+ };
+ __u32 value;
+ } flags;
+};
+
+struct d3dkmt_createallocationflags {
+ union {
+ struct {
+ __u32 create_resource:1;
+ __u32 create_shared:1;
+ __u32 non_secure:1;
+ __u32 create_protected:1;
+ __u32 restrict_shared_access:1;
+ __u32 existing_sysmem:1;
+ __u32 nt_security_sharing:1;
+ __u32 read_only:1;
+ __u32 create_write_combined:1;
+ __u32 create_cached:1;
+ __u32 swap_chain_back_buffer:1;
+ __u32 cross_adapter:1;
+ __u32 open_cross_adapter:1;
+ __u32 partial_shared_creation:1;
+ __u32 zeroed:1;
+ __u32 write_watch:1;
+ __u32 standard_allocation:1;
+ __u32 existing_section:1;
+ __u32 reserved:14;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_createallocation {
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+ struct d3dkmthandle global_share;
+ __u32 reserved;
+#ifdef __KERNEL__
+ const void *private_runtime_data;
+#else
+ __u64 private_runtime_data;
+#endif
+ __u32 private_runtime_data_size;
+ __u32 reserved1;
+ union {
+#ifdef __KERNEL__
+ struct d3dkmt_createstandardallocation *standard_allocation;
+ const void *priv_drv_data;
+#else
+ __u64 standard_allocation;
+ __u64 priv_drv_data;
+#endif
+ };
+ __u32 priv_drv_data_size;
+ __u32 alloc_count;
+#ifdef __KERNEL__
+ struct d3dddi_allocationinfo *allocation_info;
+#else
+ __u64 allocation_info;
+#endif
+ struct d3dkmt_createallocationflags flags;
+ __u32 reserved2;
+ __u64 private_runtime_resource_handle;
+};
+
+struct d3dddicb_destroyallocation2flags {
+ union {
+ struct {
+ __u32 assume_not_in_use:1;
+ __u32 synchronous_destroy:1;
+ __u32 reserved:29;
+ __u32 system_use_only:1;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_destroyallocation2 {
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+#ifdef __KERNEL__
+ const struct d3dkmthandle *allocations;
+#else
+ __u64 allocations;
+#endif
+ __u32 alloc_count;
+ struct d3dddicb_destroyallocation2flags flags;
+};
+
+struct d3dddi_makeresident_flags {
+ union {
+ struct {
+ __u32 cant_trim_further:1;
+ __u32 must_succeed:1;
+ __u32 reserved:30;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dddi_makeresident {
+ struct d3dkmthandle paging_queue;
+ __u32 alloc_count;
+#ifdef __KERNEL__
+ const struct d3dkmthandle *allocation_list;
+ const __u32 *priority_list;
+#else
+ __u64 allocation_list;
+ __u64 priority_list;
+#endif
+ struct d3dddi_makeresident_flags flags;
+ __u64 paging_fence_value;
+ __u64 num_bytes_to_trim;
+};
+
+struct d3dddi_evict_flags {
+ union {
+ struct {
+ __u32 evict_only_if_necessary:1;
+ __u32 not_written_to:1;
+ __u32 reserved:30;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_evict {
+ struct d3dkmthandle device;
+ __u32 alloc_count;
+#ifdef __KERNEL__
+ const struct d3dkmthandle *allocations;
+#else
+ __u64 allocations;
+#endif
+ struct d3dddi_evict_flags flags;
+ __u32 reserved;
+ __u64 num_bytes_to_trim;
+};
+
+struct d3dddigpuva_protection_type {
+ union {
+ struct {
+ __u64 write:1;
+ __u64 execute:1;
+ __u64 zero:1;
+ __u64 no_access:1;
+ __u64 system_use_only:1;
+ __u64 reserved:59;
+ };
+ __u64 value;
+ };
+};
+
+enum d3dddi_updategpuvirtualaddress_operation_type {
+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP = 0,
+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_UNMAP = 1,
+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_COPY = 2,
+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP_PROTECT = 3,
+};
+
+struct d3dddi_updategpuvirtualaddress_operation {
+ enum d3dddi_updategpuvirtualaddress_operation_type operation;
+ union {
+ struct {
+ __u64 base_address;
+ __u64 size;
+ struct d3dkmthandle allocation;
+ __u64 allocation_offset;
+ __u64 allocation_size;
+ } map;
+ struct {
+ __u64 base_address;
+ __u64 size;
+ struct d3dkmthandle allocation;
+ __u64 allocation_offset;
+ __u64 allocation_size;
+ struct d3dddigpuva_protection_type protection;
+ __u64 driver_protection;
+ } map_protect;
+ struct {
+ __u64 base_address;
+ __u64 size;
+ struct d3dddigpuva_protection_type protection;
+ } unmap;
+ struct {
+ __u64 source_address;
+ __u64 size;
+ __u64 dest_address;
+ } copy;
+ };
+};
+
+enum d3dddigpuva_reservation_type {
+ _D3DDDIGPUVA_RESERVE_NO_ACCESS = 0,
+ _D3DDDIGPUVA_RESERVE_ZERO = 1,
+ _D3DDDIGPUVA_RESERVE_NO_COMMIT = 2
+};
+
+struct d3dkmt_updategpuvirtualaddress {
+ struct d3dkmthandle device;
+ struct d3dkmthandle context;
+ struct d3dkmthandle fence_object;
+ __u32 num_operations;
+#ifdef __KERNEL__
+ struct d3dddi_updategpuvirtualaddress_operation *operations;
+#else
+ __u64 operations;
+#endif
+ __u32 reserved0;
+ __u32 reserved1;
+ __u64 reserved2;
+ __u64 fence_value;
+ union {
+ struct {
+ __u32 do_not_wait:1;
+ __u32 reserved:31;
+ };
+ __u32 value;
+ } flags;
+ __u32 reserved3;
+};
+
+struct d3dddi_mapgpuvirtualaddress {
+ struct d3dkmthandle paging_queue;
+ __u64 base_address;
+ __u64 minimum_address;
+ __u64 maximum_address;
+ struct d3dkmthandle allocation;
+ __u64 offset_in_pages;
+ __u64 size_in_pages;
+ struct d3dddigpuva_protection_type protection;
+ __u64 driver_protection;
+ __u32 reserved0;
+ __u64 reserved1;
+ __u64 virtual_address;
+ __u64 paging_fence_value;
+};
+
+struct d3dddi_reservegpuvirtualaddress {
+ struct d3dkmthandle adapter;
+ __u64 base_address;
+ __u64 minimum_address;
+ __u64 maximum_address;
+ __u64 size;
+ enum d3dddigpuva_reservation_type reservation_type;
+ __u64 driver_protection;
+ __u64 virtual_address;
+ __u64 paging_fence_value;
+};
+
+struct d3dkmt_freegpuvirtualaddress {
+ struct d3dkmthandle adapter;
+ __u32 reserved;
+ __u64 base_address;
+ __u64 size;
+};
+
+enum d3dkmt_memory_segment_group {
+ _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0,
+ _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1
+};
+
+struct d3dkmt_queryvideomemoryinfo {
+ __u64 process;
+ struct d3dkmthandle adapter;
+ enum d3dkmt_memory_segment_group memory_segment_group;
+ __u64 budget;
+ __u64 current_usage;
+ __u64 current_reservation;
+ __u64 available_for_reservation;
+ __u32 physical_adapter_index;
+};
+
+struct d3dkmt_adaptertype {
+ union {
+ struct {
+ __u32 render_supported:1;
+ __u32 display_supported:1;
+ __u32 software_device:1;
+ __u32 post_device:1;
+ __u32 hybrid_discrete:1;
+ __u32 hybrid_integrated:1;
+ __u32 indirect_display_device:1;
+ __u32 paravirtualized:1;
+ __u32 acg_supported:1;
+ __u32 support_set_timings_from_vidpn:1;
+ __u32 detachable:1;
+ __u32 compute_only:1;
+ __u32 prototype:1;
+ __u32 reserved:19;
+ };
+ __u32 value;
+ };
+};
+
+enum kmtqueryadapterinfotype {
+ _KMTQAITYPE_UMDRIVERPRIVATE = 0,
+ _KMTQAITYPE_ADAPTERTYPE = 15,
+ _KMTQAITYPE_ADAPTERTYPE_RENDER = 57
+};
+
+struct d3dkmt_queryadapterinfo {
+ struct d3dkmthandle adapter;
+ enum kmtqueryadapterinfotype type;
+#ifdef __KERNEL__
+ void *private_data;
+#else
+ __u64 private_data;
+#endif
+ __u32 private_data_size;
+};
+
+enum d3dkmt_escapetype {
+ _D3DKMT_ESCAPE_DRIVERPRIVATE = 0,
+ _D3DKMT_ESCAPE_VIDMM = 1,
+ _D3DKMT_ESCAPE_VIDSCH = 3,
+ _D3DKMT_ESCAPE_DEVICE = 4,
+ _D3DKMT_ESCAPE_DRT_TEST = 8,
+};
+
+enum d3dkmt_drt_test_command {
+ _D3DKMT_DRT_TEST_COMMAND_HANDLETABLE = 39,
+};
+
+struct d3dkmt_drt_escape_head {
+ __u32 signature;
+ __u32 buffer_size;
+ enum d3dkmt_drt_test_command command;
+};
+
+enum d3dkmt_ht_command {
+ _D3DKMT_HT_COMMAND_ALLOC,
+ _D3DKMT_HT_COMMAND_FREE,
+ _D3DKMT_HT_COMMAND_ASSIGN,
+ _D3DKMT_HT_COMMAND_GET,
+ _D3DKMT_HT_COMMAND_DESTROY,
+};
+
+struct d3dkmt_ht_desc {
+ struct d3dkmt_drt_escape_head head;
+ enum d3dkmt_ht_command command;
+ __u32 index;
+ struct d3dkmthandle handle;
+ __u32 object_type;
+#ifdef __KERNEL__
+ void *object;
+#else
+ __u64 object;
+#endif
+};
+
+struct d3dddi_escapeflags {
+ union {
+ struct {
+ __u32 hardware_access:1;
+ __u32 device_status_query:1;
+ __u32 change_frame_latency:1;
+ __u32 no_adapter_synchronization:1;
+ __u32 reserved:1;
+ __u32 virtual_machine_data:1;
+ __u32 driver_known_escape:1;
+ __u32 driver_common_escape:1;
+ __u32 reserved2:24;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_escape {
+ struct d3dkmthandle adapter;
+ struct d3dkmthandle device;
+ enum d3dkmt_escapetype type;
+ struct d3dddi_escapeflags flags;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+ struct d3dkmthandle context;
+};
+
+enum dxgk_render_pipeline_stage {
+ _DXGK_RENDER_PIPELINE_STAGE_UNKNOWN = 0,
+ _DXGK_RENDER_PIPELINE_STAGE_INPUT_ASSEMBLER = 1,
+ _DXGK_RENDER_PIPELINE_STAGE_VERTEX_SHADER = 2,
+ _DXGK_RENDER_PIPELINE_STAGE_GEOMETRY_SHADER = 3,
+ _DXGK_RENDER_PIPELINE_STAGE_STREAM_OUTPUT = 4,
+ _DXGK_RENDER_PIPELINE_STAGE_RASTERIZER = 5,
+ _DXGK_RENDER_PIPELINE_STAGE_PIXEL_SHADER = 6,
+ _DXGK_RENDER_PIPELINE_STAGE_OUTPUT_MERGER = 7,
+};
+
+enum dxgk_page_fault_flags {
+ _DXGK_PAGE_FAULT_WRITE = 0x1,
+ _DXGK_PAGE_FAULT_FENCE_INVALID = 0x2,
+ _DXGK_PAGE_FAULT_ADAPTER_RESET_REQUIRED = 0x4,
+ _DXGK_PAGE_FAULT_ENGINE_RESET_REQUIRED = 0x8,
+ _DXGK_PAGE_FAULT_FATAL_HARDWARE_ERROR = 0x10,
+ _DXGK_PAGE_FAULT_IOMMU = 0x20,
+ _DXGK_PAGE_FAULT_HW_CONTEXT_VALID = 0x40,
+ _DXGK_PAGE_FAULT_PROCESS_HANDLE_VALID = 0x80,
+};
+
+enum dxgk_general_error_code {
+ _DXGK_GENERAL_ERROR_PAGE_FAULT = 0,
+ _DXGK_GENERAL_ERROR_INVALID_INSTRUCTION = 1,
+};
+
+struct dxgk_fault_error_code {
+ union {
+ struct {
+ __u32 is_device_specific_code:1;
+ enum dxgk_general_error_code general_error_code:31;
+ };
+ struct {
+ __u32 is_device_specific_code_reserved_bit:1;
+ __u32 device_specific_code:31;
+ };
+ };
+};
+
+enum d3dkmt_deviceexecution_state {
+ _D3DKMT_DEVICEEXECUTION_ACTIVE = 1,
+ _D3DKMT_DEVICEEXECUTION_RESET = 2,
+ _D3DKMT_DEVICEEXECUTION_HUNG = 3,
+ _D3DKMT_DEVICEEXECUTION_STOPPED = 4,
+ _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5,
+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6,
+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7,
+};
+
+struct d3dkmt_devicereset_state {
+ union {
+ struct {
+ __u32 desktop_switched:1;
+ __u32 reserved:31;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_present_stats {
+ __u32 present_count;
+ __u32 present_refresh_count;
+ __u32 sync_refresh_count;
+ __u32 reserved;
+ __u64 sync_qpc_time;
+ __u64 sync_gpu_time;
+};
+
+struct d3dkmt_devicepresent_state {
+ __u32 vidpn_source_id;
+ __u32 reserved;
+ struct d3dkmt_present_stats present_stats;
+};
+
+struct d3dkmt_present_stats_dwm {
+ __u32 present_count;
+ __u32 present_refresh_count;
+ __u64 present_qpc_time;
+ __u32 sync_refresh_count;
+ __u32 reserved;
+ __u64 sync_qpc_time;
+ __u32 custom_present_duration;
+ __u32 reserved1;
+};
+
+struct d3dkmt_devicepagefault_state {
+ __u64 faulted_primitive_api_sequence_number;
+ enum dxgk_render_pipeline_stage faulted_pipeline_stage;
+ __u32 faulted_bind_table_entry;
+ enum dxgk_page_fault_flags page_fault_flags;
+ struct dxgk_fault_error_code fault_error_code;
+ __u64 faulted_virtual_address;
+};
+
+struct d3dkmt_devicepresent_state_dwm {
+ __u32 vidpn_source_id;
+ __u32 reserved;
+ struct d3dkmt_present_stats_dwm present_stats;
+};
+
+struct d3dkmt_devicepresent_queue_state {
+ __u32 vidpn_source_id;
+ bool bQueuedPresentLimitReached;
+};
+
+enum d3dkmt_devicestate_type {
+ _D3DKMT_DEVICESTATE_EXECUTION = 1,
+ _D3DKMT_DEVICESTATE_PRESENT = 2,
+ _D3DKMT_DEVICESTATE_RESET = 3,
+ _D3DKMT_DEVICESTATE_PRESENT_DWM = 4,
+ _D3DKMT_DEVICESTATE_PAGE_FAULT = 5,
+ _D3DKMT_DEVICESTATE_PRESENT_QUEUE = 6,
+};
+
+struct d3dkmt_getdevicestate {
+ struct d3dkmthandle device;
+ enum d3dkmt_devicestate_type state_type;
+ union {
+ enum d3dkmt_deviceexecution_state execution_state;
+ struct d3dkmt_devicepresent_state present_state;
+ struct d3dkmt_devicereset_state reset_state;
+ struct d3dkmt_devicepresent_state_dwm present_state_dwm;
+ struct d3dkmt_devicepagefault_state page_fault_state;
+ struct d3dkmt_devicepresent_queue_state present_queue_state;
+ };
+};
+
+enum d3dkmdt_gdisurfacetype {
+ _D3DKMDT_GDISURFACE_INVALID = 0,
+ _D3DKMDT_GDISURFACE_TEXTURE = 1,
+ _D3DKMDT_GDISURFACE_STAGING_CPUVISIBLE = 2,
+ _D3DKMDT_GDISURFACE_STAGING = 3,
+ _D3DKMDT_GDISURFACE_LOOKUPTABLE = 4,
+ _D3DKMDT_GDISURFACE_EXISTINGSYSMEM = 5,
+ _D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE = 6,
+ _D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER = 7,
+ _D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE_CROSSADAPTER = 8,
+};
+
+struct d3dddi_rational {
+ __u32 numerator;
+ __u32 denominator;
+};
+
+enum d3dddiformat {
+ _D3DDDIFMT_UNKNOWN = 0,
+};
+
+struct d3dkmdt_gdisurfacedata {
+ __u32 width;
+ __u32 height;
+ __u32 format;
+ enum d3dkmdt_gdisurfacetype type;
+ __u32 flags;
+ __u32 pitch;
+};
+
+struct d3dkmdt_stagingsurfacedata {
+ __u32 width;
+ __u32 height;
+ __u32 pitch;
+};
+
+struct d3dkmdt_sharedprimarysurfacedata {
+ __u32 width;
+ __u32 height;
+ enum d3dddiformat format;
+ struct d3dddi_rational refresh_rate;
+ __u32 vidpn_source_id;
+};
+
+struct d3dkmdt_shadowsurfacedata {
+ __u32 width;
+ __u32 height;
+ enum d3dddiformat format;
+ __u32 pitch;
+};
+
+enum d3dkmdt_standardallocationtype {
+ _D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE = 1,
+ _D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE = 2,
+ _D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE = 3,
+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE = 4,
+};
+
+struct d3dddi_synchronizationobject_flags {
+ union {
+ struct {
+ __u32 shared:1;
+ __u32 nt_security_sharing:1;
+ __u32 cross_adapter:1;
+ __u32 top_of_pipeline:1;
+ __u32 no_signal:1;
+ __u32 no_wait:1;
+ __u32 no_signal_max_value_on_tdr:1;
+ __u32 no_gpu_access:1;
+ __u32 reserved:23;
+ };
+ __u32 value;
+ };
+};
+
+enum d3dddi_synchronizationobject_type {
+ _D3DDDI_SYNCHRONIZATION_MUTEX = 1,
+ _D3DDDI_SEMAPHORE = 2,
+ _D3DDDI_FENCE = 3,
+ _D3DDDI_CPU_NOTIFICATION = 4,
+ _D3DDDI_MONITORED_FENCE = 5,
+ _D3DDDI_PERIODIC_MONITORED_FENCE = 6,
+ _D3DDDI_SYNCHRONIZATION_TYPE_LIMIT
+};
+
+struct d3dddi_synchronizationobjectinfo2 {
+ enum d3dddi_synchronizationobject_type type;
+ struct d3dddi_synchronizationobject_flags flags;
+ union {
+ struct {
+ __u32 initial_state;
+ } synchronization_mutex;
+
+ struct {
+ __u32 max_count;
+ __u32 initial_count;
+ } semaphore;
+
+ struct {
+ __u64 fence_value;
+ } fence;
+
+ struct {
+ __u64 event;
+ } cpu_notification;
+
+ struct {
+ __u64 initial_fence_value;
+#ifdef __KERNEL__
+ void *fence_cpu_virtual_address;
+#else
+ __u64 *fence_cpu_virtual_address;
+#endif
+ __u64 fence_gpu_virtual_address;
+ __u32 engine_affinity;
+ } monitored_fence;
+
+ struct {
+ struct d3dkmthandle adapter;
+ __u32 vidpn_target_id;
+ __u64 time;
+#ifdef __KERNEL__
+ void *fence_cpu_virtual_address;
+#else
+ __u64 fence_cpu_virtual_address;
+#endif
+ __u64 fence_gpu_virtual_address;
+ __u32 engine_affinity;
+ } periodic_monitored_fence;
+
+ struct {
+ __u64 reserved[8];
+ } reserved;
+ };
+ struct d3dkmthandle shared_handle;
+};
+
+struct d3dkmt_createsynchronizationobject2 {
+ struct d3dkmthandle device;
+ __u32 reserved;
+ struct d3dddi_synchronizationobjectinfo2 info;
+ struct d3dkmthandle sync_object;
+ __u32 reserved1;
+};
+
+struct d3dkmt_waitforsynchronizationobject2 {
+ struct d3dkmthandle context;
+ __u32 object_count;
+ struct d3dkmthandle object_array[D3DDDI_MAX_OBJECT_WAITED_ON];
+ union {
+ struct {
+ __u64 fence_value;
+ } fence;
+ __u64 reserved[8];
+ };
+};
+
+struct d3dddicb_signalflags {
+ union {
+ struct {
+ __u32 signal_at_submission:1;
+ __u32 enqueue_cpu_event:1;
+ __u32 allow_fence_rewind:1;
+ __u32 reserved:28;
+ __u32 DXGK_SIGNAL_FLAG_INTERNAL0:1;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_signalsynchronizationobject2 {
+ struct d3dkmthandle context;
+ __u32 object_count;
+ struct d3dkmthandle object_array[D3DDDI_MAX_OBJECT_SIGNALED];
+ struct d3dddicb_signalflags flags;
+ __u32 context_count;
+ struct d3dkmthandle contexts[D3DDDI_MAX_BROADCAST_CONTEXT];
+ union {
+ struct {
+ __u64 fence_value;
+ } fence;
+ __u64 cpu_event_handle;
+ __u64 reserved[8];
+ };
+};
+
+struct d3dddi_waitforsynchronizationobjectfromcpu_flags {
+ union {
+ struct {
+ __u32 wait_any:1;
+ __u32 reserved:31;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_waitforsynchronizationobjectfromcpu {
+ struct d3dkmthandle device;
+ __u32 object_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+ __u64 *fence_values;
+#else
+ __u64 objects;
+ __u64 fence_values;
+#endif
+ __u64 async_event;
+ struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags;
+};
+
+struct d3dkmt_signalsynchronizationobjectfromcpu {
+ struct d3dkmthandle device;
+ __u32 object_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+ __u64 *fence_values;
+#else
+ __u64 objects;
+ __u64 fence_values;
+#endif
+ struct d3dddicb_signalflags flags;
+};
+
+struct d3dkmt_waitforsynchronizationobjectfromgpu {
+ struct d3dkmthandle context;
+ __u32 object_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+#else
+ __u64 objects;
+#endif
+ union {
+#ifdef __KERNEL__
+ __u64 *monitored_fence_values;
+#else
+ __u64 monitored_fence_values;
+#endif
+ __u64 fence_value;
+ __u64 reserved[8];
+ };
+};
+
+struct d3dkmt_signalsynchronizationobjectfromgpu {
+ struct d3dkmthandle context;
+ __u32 object_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+#else
+ __u64 objects;
+#endif
+ union {
+#ifdef __KERNEL__
+ __u64 *monitored_fence_values;
+#else
+ __u64 monitored_fence_values;
+#endif
+ __u64 reserved[8];
+ };
+};
+
+struct d3dkmt_signalsynchronizationobjectfromgpu2 {
+ __u32 object_count;
+ __u32 reserved1;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+#else
+ __u64 objects;
+#endif
+ struct d3dddicb_signalflags flags;
+ __u32 context_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *contexts;
+#else
+ __u64 contexts;
+#endif
+ union {
+ __u64 fence_value;
+ __u64 cpu_event_handle;
+#ifdef __KERNEL__
+ __u64 *monitored_fence_values;
+#else
+ __u64 monitored_fence_values;
+#endif
+ __u64 reserved[8];
+ };
+};
+
+struct d3dkmt_destroysynchronizationobject {
+ struct d3dkmthandle sync_object;
+};
+
+struct d3dkmt_opensynchronizationobject {
+ struct d3dkmthandle shared_handle;
+ struct d3dkmthandle sync_object;
+ __u64 reserved[8];
+};
+
+struct d3dkmt_submitcommandflags {
+ __u32 null_rendering:1;
+ __u32 present_redirected:1;
+ __u32 reserved:30;
+};
+
+struct d3dkmt_submitcommand {
+ __u64 command_buffer;
+ __u32 command_length;
+ struct d3dkmt_submitcommandflags flags;
+ __u64 present_history_token;
+ __u32 broadcast_context_count;
+ struct d3dkmthandle broadcast_context[D3DDDI_MAX_BROADCAST_CONTEXT];
+ __u32 reserved;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+ __u32 num_primaries;
+ struct d3dkmthandle written_primaries[D3DDDI_MAX_WRITTEN_PRIMARIES];
+ __u32 num_history_buffers;
+ __u32 reserved1;
+#ifdef __KERNEL__
+ struct d3dkmthandle *history_buffer_array;
+#else
+ __u64 history_buffer_array;
+#endif
+};
+
+struct d3dkmt_submitcommandtohwqueue {
+ struct d3dkmthandle hwqueue;
+ __u32 reserved;
+ __u64 hwqueue_progress_fence_id;
+ __u64 command_buffer;
+ __u32 command_length;
+ __u32 priv_drv_data_size;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 num_primaries;
+ __u32 reserved1;
+#ifdef __KERNEL__
+ struct d3dkmthandle *written_primaries;
+#else
+ __u64 written_primaries;
+#endif
+};
+
+struct d3dkmt_setcontextschedulingpriority {
+ struct d3dkmthandle context;
+ int priority;
+};
+
+struct d3dkmt_setcontextinprocessschedulingpriority {
+ struct d3dkmthandle context;
+ int priority;
+};
+
+struct d3dkmt_getcontextschedulingpriority {
+ struct d3dkmthandle context;
+ int priority;
+};
+
+struct d3dkmt_getcontextinprocessschedulingpriority {
+ struct d3dkmthandle context;
+ int priority;
+};
+
+struct d3dkmt_setallocationpriority {
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+#ifdef __KERNEL__
+ const struct d3dkmthandle *allocation_list;
+#else
+ __u64 allocation_list;
+#endif
+ __u32 allocation_count;
+ __u32 reserved;
+#ifdef __KERNEL__
+ const __u32 *priorities;
+#else
+ __u64 priorities;
+#endif
+};
+
+struct d3dkmt_getallocationpriority {
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+#ifdef __KERNEL__
+ const struct d3dkmthandle *allocation_list;
+#else
+ __u64 allocation_list;
+#endif
+ __u32 allocation_count;
+ __u32 reserved;
+#ifdef __KERNEL__
+ __u32 *priorities;
+#else
+ __u64 priorities;
+#endif
+};
+
+enum d3dkmt_allocationresidencystatus {
+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINGPUMEMORY = 1,
+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINSHAREDMEMORY = 2,
+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_NOTRESIDENT = 3,
+};
+
+struct d3dkmt_queryallocationresidency {
+ struct d3dkmthandle device;
+ struct d3dkmthandle resource;
+#ifdef __KERNEL__
+ struct d3dkmthandle *allocations;
+#else
+ __u64 allocations;
+#endif
+ __u32 allocation_count;
+ __u32 reserved;
+#ifdef __KERNEL__
+ enum d3dkmt_allocationresidencystatus *residency_status;
+#else
+ __u64 residency_status;
+#endif
+};
+
+struct d3dddicb_lock2flags {
+ union {
+ struct {
+ __u32 reserved:32;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_lock2 {
+ struct d3dkmthandle device;
+ struct d3dkmthandle allocation;
+ struct d3dddicb_lock2flags flags;
+ __u32 reserved;
+#ifdef __KERNEL__
+ void *data;
+#else
+ __u64 data;
+#endif
+};
+
+struct d3dkmt_unlock2 {
+ struct d3dkmthandle device;
+ struct d3dkmthandle allocation;
+};
+
+enum d3dkmt_device_error_reason {
+ _D3DKMT_DEVICE_ERROR_REASON_GENERIC = 0x80000000,
+ _D3DKMT_DEVICE_ERROR_REASON_DRIVER_ERROR = 0x80000006,
+};
+
+struct d3dkmt_markdeviceaserror {
+ struct d3dkmthandle device;
+ enum d3dkmt_device_error_reason reason;
+};
+
+struct d3dddi_updateallocproperty_flags {
+ union {
+ struct {
+ __u32 accessed_physically:1;
+ __u32 reserved:31;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dddi_segmentpreference {
+ union {
+ struct {
+ __u32 segment_id0:5;
+ __u32 direction0:1;
+ __u32 segment_id1:5;
+ __u32 direction1:1;
+ __u32 segment_id2:5;
+ __u32 direction2:1;
+ __u32 segment_id3:5;
+ __u32 direction3:1;
+ __u32 segment_id4:5;
+ __u32 direction4:1;
+ __u32 reserved:2;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dddi_updateallocproperty {
+ struct d3dkmthandle paging_queue;
+ struct d3dkmthandle allocation;
+ __u32 supported_segment_set;
+ struct d3dddi_segmentpreference preferred_segment;
+ struct d3dddi_updateallocproperty_flags flags;
+ __u64 paging_fence_value;
+ union {
+ struct {
+ __u32 set_accessed_physically:1;
+ __u32 set_supported_segmentSet:1;
+ __u32 set_preferred_segment:1;
+ __u32 reserved:29;
+ };
+ __u32 property_mask_value;
+ };
+};
+
+enum d3dkmt_offer_priority {
+ _D3DKMT_OFFER_PRIORITY_LOW = 1,
+ _D3DKMT_OFFER_PRIORITY_NORMAL = 2,
+ _D3DKMT_OFFER_PRIORITY_HIGH = 3,
+ _D3DKMT_OFFER_PRIORITY_AUTO = 4,
+};
+
+struct d3dkmt_offer_flags {
+ union {
+ struct {
+ __u32 offer_immediately:1;
+ __u32 allow_decommit:1;
+ __u32 reserved:30;
+ };
+ __u32 value;
+ };
+};
+
+struct d3dkmt_offerallocations {
+ struct d3dkmthandle device;
+ __u32 reserved;
+#ifdef __KERNEL__
+ struct d3dkmthandle *resources;
+ const struct d3dkmthandle *allocations;
+#else
+ __u64 resources;
+ __u64 allocations;
+#endif
+ __u32 allocation_count;
+ enum d3dkmt_offer_priority priority;
+ struct d3dkmt_offer_flags flags;
+ __u32 reserved1;
+};
+
+enum d3dddi_reclaim_result {
+ _D3DDDI_RECLAIM_RESULT_OK = 0,
+ _D3DDDI_RECLAIM_RESULT_DISCARDED = 1,
+ _D3DDDI_RECLAIM_RESULT_NOT_COMMITTED = 2,
+};
+
+struct d3dkmt_reclaimallocations2 {
+ struct d3dkmthandle paging_queue;
+ __u32 allocation_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *resources;
+ struct d3dkmthandle *allocations;
+#else
+ __u64 resources;
+ __u64 allocations;
+#endif
+ union {
+#ifdef __KERNEL__
+ __u32 *discarded;
+ enum d3dddi_reclaim_result *results;
+#else
+ __u64 discarded;
+ __u64 results;
+#endif
+ };
+ __u64 paging_fence_value;
+};
+
+struct d3dkmt_changevideomemoryreservation {
+ __u64 process;
+ struct d3dkmthandle adapter;
+ enum d3dkmt_memory_segment_group memory_segment_group;
+ __u64 reservation;
+ __u32 physical_adapter_index;
+};
+
+struct d3dkmt_createhwcontext {
+ struct d3dkmthandle device;
+ __u32 node_ordinal;
+ __u32 engine_affinity;
+ struct d3dddi_createhwcontextflags flags;
+ __u32 priv_drv_data_size;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ struct d3dkmthandle context;
+};
+
+struct d3dkmt_destroyhwcontext {
+ struct d3dkmthandle context;
+};
+
+struct d3dkmt_createhwqueue {
+ struct d3dkmthandle context;
+ struct d3dddi_createhwqueueflags flags;
+ __u32 priv_drv_data_size;
+ __u32 reserved;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ struct d3dkmthandle queue;
+ struct d3dkmthandle queue_progress_fence;
+#ifdef __KERNEL__
+ void *queue_progress_fence_cpu_va;
+#else
+ __u64 queue_progress_fence_cpu_va;
+#endif
+ __u64 queue_progress_fence_gpu_va;
+};
+
+struct d3dkmt_destroyhwqueue {
+ struct d3dkmthandle queue;
+};
+
+struct d3dkmt_submitwaitforsyncobjectstohwqueue {
+ struct d3dkmthandle hwqueue;
+ __u32 object_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+ __u64 *fence_values;
+#else
+ __u64 objects;
+ __u64 fence_values;
+#endif
+};
+
+struct d3dkmt_submitsignalsyncobjectstohwqueue {
+ struct d3dddicb_signalflags flags;
+ __u32 hwqueue_count;
+#ifdef __KERNEL__
+ struct d3dkmthandle *hwqueues;
+#else
+ __u64 hwqueues;
+#endif
+ __u32 object_count;
+ __u32 reserved;
+#ifdef __KERNEL__
+ struct d3dkmthandle *objects;
+ __u64 *fence_values;
+#else
+ __u64 objects;
+ __u64 fence_values;
+#endif
+};
+
+#pragma pack(push, 1)
+
+struct dxgk_gpuclockdata_flags {
+ union {
+ struct {
+ __u32 context_management_processor:1;
+ __u32 reserved:31;
+ };
+ __u32 value;
+ };
+};
+
+struct dxgk_gpuclockdata {
+ __u64 gpu_frequency;
+ __u64 gpu_clock_counter;
+ __u64 cpu_clock_counter;
+ struct dxgk_gpuclockdata_flags flags;
+} __packed;
+
+struct d3dkmt_queryclockcalibration {
+ struct d3dkmthandle adapter;
+ __u32 node_ordinal;
+ __u32 physical_adapter_index;
+ struct dxgk_gpuclockdata clock_data;
+};
+
+#pragma pack(pop)
+
+struct d3dkmt_flushheaptransitions {
+ struct d3dkmthandle adapter;
+};
+
+struct d3dkmt_getsharedresourceadapterluid {
+ struct d3dkmthandle global_share;
+ __u64 handle;
+ struct winluid adapter_luid;
+};
+
+struct d3dkmt_invalidatecache {
+ struct d3dkmthandle device;
+ struct d3dkmthandle allocation;
+ __u64 offset;
+ __u64 length;
+};
+
+struct d3dddi_openallocationinfo2 {
+ struct d3dkmthandle allocation;
+#ifdef __KERNEL__
+ void *priv_drv_data;
+#else
+ __u64 priv_drv_data;
+#endif
+ __u32 priv_drv_data_size;
+ __u64 gpu_va;
+ __u64 reserved[6];
+};
+
+struct d3dkmt_opensyncobjectfromnthandle {
+ __u64 nt_handle;
+ struct d3dkmthandle sync_object;
+};
+
+struct d3dkmt_opensyncobjectfromnthandle2 {
+ __u64 nt_handle;
+ struct d3dkmthandle device;
+ struct d3dddi_synchronizationobject_flags flags;
+ struct d3dkmthandle sync_object;
+ __u32 reserved1;
+ union {
+ struct {
+#ifdef __KERNEL__
+ void *fence_value_cpu_va;
+#else
+ __u64 fence_value_cpu_va;
+#endif
+ __u64 fence_value_gpu_va;
+ __u32 engine_affinity;
+ } monitored_fence;
+ __u64 reserved[8];
+ };
+};
+
+struct d3dkmt_openresource {
+ struct d3dkmthandle device;
+ struct d3dkmthandle global_share;
+ __u32 allocation_count;
+#ifdef __KERNEL__
+ struct d3dddi_openallocationinfo2 *open_alloc_info;
+ void *private_runtime_data;
+#else
+ __u64 open_alloc_info;
+ __u64 private_runtime_data;
+#endif
+ int private_runtime_data_size;
+#ifdef __KERNEL__
+ void *resource_priv_drv_data;
+#else
+ __u64 resource_priv_drv_data;
+#endif
+ __u32 resource_priv_drv_data_size;
+#ifdef __KERNEL__
+ void *total_priv_drv_data;
+#else
+ __u64 total_priv_drv_data;
+#endif
+ __u32 total_priv_drv_data_size;
+ struct d3dkmthandle resource;
+};
+
+struct d3dkmt_openresourcefromnthandle {
+ struct d3dkmthandle device;
+ __u32 reserved;
+ __u64 nt_handle;
+ __u32 allocation_count;
+ __u32 reserved1;
+#ifdef __KERNEL__
+ struct d3dddi_openallocationinfo2 *open_alloc_info;
+#else
+ __u64 open_alloc_info;
+#endif
+ int private_runtime_data_size;
+ __u32 reserved2;
+#ifdef __KERNEL__
+ void *private_runtime_data;
+#else
+ __u64 private_runtime_data;
+#endif
+ __u32 resource_priv_drv_data_size;
+ __u32 reserved3;
+#ifdef __KERNEL__
+ void *resource_priv_drv_data;
+#else
+ __u64 resource_priv_drv_data;
+#endif
+ __u32 total_priv_drv_data_size;
+#ifdef __KERNEL__
+ void *total_priv_drv_data;
+#else
+ __u64 total_priv_drv_data;
+#endif
+ struct d3dkmthandle resource;
+ struct d3dkmthandle keyed_mutex;
+#ifdef __KERNEL__
+ void *keyed_mutex_private_data;
+#else
+ __u64 keyed_mutex_private_data;
+#endif
+ __u32 keyed_mutex_private_data_size;
+ struct d3dkmthandle sync_object;
+};
+
+struct d3dkmt_queryresourceinfofromnthandle {
+ struct d3dkmthandle device;
+ __u32 reserved;
+ __u64 nt_handle;
+#ifdef __KERNEL__
+ void *private_runtime_data;
+#else
+ __u64 private_runtime_data;
+#endif
+ __u32 private_runtime_data_size;
+ __u32 total_priv_drv_data_size;
+ __u32 resource_priv_drv_data_size;
+ __u32 allocation_count;
+};
+
+struct d3dkmt_queryresourceinfo {
+ struct d3dkmthandle device;
+ struct d3dkmthandle global_share;
+#ifdef __KERNEL__
+ void *private_runtime_data;
+#else
+ __u64 private_runtime_data;
+#endif
+ __u32 private_runtime_data_size;
+ __u32 total_priv_drv_data_size;
+ __u32 resource_priv_drv_data_size;
+ __u32 allocation_count;
+};
+
+struct d3dkmt_shareobjects {
+ __u32 object_count;
+ __u32 reserved;
+#ifdef __KERNEL__
+ const struct d3dkmthandle *objects;
+ void *object_attr; /* security attributes */
+#else
+ __u64 objects;
+ __u64 object_attr;
+#endif
+ __u32 desired_access;
+ __u32 reserved1;
+#ifdef __KERNEL__
+ __u64 *shared_handle; /* output file descriptors */
+#else
+ __u64 shared_handle;
+#endif
+};
+
+union d3dkmt_enumadapters_filter {
+ struct {
+ __u64 include_compute_only:1;
+ __u64 include_display_only:1;
+ __u64 reserved:62;
+ };
+ __u64 value;
+};
+
+struct d3dkmt_enumadapters3 {
+ union d3dkmt_enumadapters_filter filter;
+ __u32 adapter_count;
+ __u32 reserved;
+#ifdef __KERNEL__
+ struct d3dkmt_adapterinfo *adapters;
+#else
+ __u64 adapters;
+#endif
+};
+
+enum d3dkmt_querystatistics_type {
+ _D3DKMT_QUERYSTATISTICS_ADAPTER = 0,
+ _D3DKMT_QUERYSTATISTICS_PROCESS = 1,
+ _D3DKMT_QUERYSTATISTICS_PROCESS_ADAPTER = 2,
+ _D3DKMT_QUERYSTATISTICS_SEGMENT = 3,
+ _D3DKMT_QUERYSTATISTICS_PROCESS_SEGMENT = 4,
+ _D3DKMT_QUERYSTATISTICS_NODE = 5,
+ _D3DKMT_QUERYSTATISTICS_PROCESS_NODE = 6,
+ _D3DKMT_QUERYSTATISTICS_VIDPNSOURCE = 7,
+ _D3DKMT_QUERYSTATISTICS_PROCESS_VIDPNSOURCE = 8,
+ _D3DKMT_QUERYSTATISTICS_PROCESS_SEGMENT_GROUP = 9,
+ _D3DKMT_QUERYSTATISTICS_PHYSICAL_ADAPTER = 10,
+};
+
+struct d3dkmt_querystatistics_result {
+ char size[0x308];
+};
+
+struct d3dkmt_querystatistics {
+ union {
+ struct {
+ enum d3dkmt_querystatistics_type type;
+ struct winluid adapter_luid;
+ __u64 process;
+ struct d3dkmt_querystatistics_result result;
+ };
+ char size[0x328];
+ };
+};
+
+struct d3dkmt_shareobjectwithhost {
+ struct d3dkmthandle device_handle;
+ struct d3dkmthandle object_handle;
+ __u64 reserved;
+ __u64 object_vail_nt_handle;
+};
+
+struct d3dkmt_createsyncfile {
+ struct d3dkmthandle device;
+ struct d3dkmthandle monitored_fence;
+ __u64 fence_value;
+ __u64 sync_file_handle; /* out */
+};
+
+struct d3dkmt_presentvirtual {
+ __s32 acquire_semaphore_fd;
+ __s32 release_semaphore_fd;
+ __s32 composition_memory_fd;
+ __u64 private_data_size;
+#ifdef __KERNEL__
+ void *private_data;
+#else
+ __u64 private_data;
+#endif
+};
+
+/*
+ * Dxgkrnl Graphics Port Driver ioctl definitions
+ *
+ */
+
+#define LX_DXOPENADAPTERFROMLUID \
+ _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid)
+#define LX_DXCREATEDEVICE \
+ _IOWR(0x47, 0x02, struct d3dkmt_createdevice)
+#define LX_DXCREATECONTEXT \
+ _IOWR(0x47, 0x03, struct d3dkmt_createcontext)
+#define LX_DXCREATECONTEXTVIRTUAL \
+ _IOWR(0x47, 0x04, struct d3dkmt_createcontextvirtual)
+#define LX_DXDESTROYCONTEXT \
+ _IOWR(0x47, 0x05, struct d3dkmt_destroycontext)
+#define LX_DXCREATEALLOCATION \
+ _IOWR(0x47, 0x06, struct d3dkmt_createallocation)
+#define LX_DXCREATEPAGINGQUEUE \
+ _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue)
+#define LX_DXRESERVEGPUVIRTUALADDRESS \
+ _IOWR(0x47, 0x08, struct d3dddi_reservegpuvirtualaddress)
+#define LX_DXQUERYADAPTERINFO \
+ _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo)
+#define LX_DXQUERYVIDEOMEMORYINFO \
+ _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo)
+#define LX_DXMAKERESIDENT \
+ _IOWR(0x47, 0x0b, struct d3dddi_makeresident)
+#define LX_DXMAPGPUVIRTUALADDRESS \
+ _IOWR(0x47, 0x0c, struct d3dddi_mapgpuvirtualaddress)
+#define LX_DXESCAPE \
+ _IOWR(0x47, 0x0d, struct d3dkmt_escape)
+#define LX_DXGETDEVICESTATE \
+ _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate)
+#define LX_DXSUBMITCOMMAND \
+ _IOWR(0x47, 0x0f, struct d3dkmt_submitcommand)
+#define LX_DXCREATESYNCHRONIZATIONOBJECT \
+ _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2)
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECT \
+ _IOWR(0x47, 0x11, struct d3dkmt_signalsynchronizationobject2)
+#define LX_DXWAITFORSYNCHRONIZATIONOBJECT \
+ _IOWR(0x47, 0x12, struct d3dkmt_waitforsynchronizationobject2)
+#define LX_DXDESTROYALLOCATION2 \
+ _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2)
+#define LX_DXENUMADAPTERS2 \
+ _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2)
+#define LX_DXCLOSEADAPTER \
+ _IOWR(0x47, 0x15, struct d3dkmt_closeadapter)
+#define LX_DXCHANGEVIDEOMEMORYRESERVATION \
+ _IOWR(0x47, 0x16, struct d3dkmt_changevideomemoryreservation)
+#define LX_DXCREATEHWCONTEXT \
+ _IOWR(0x47, 0x17, struct d3dkmt_createhwcontext)
+#define LX_DXCREATEHWQUEUE \
+ _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue)
+#define LX_DXDESTROYDEVICE \
+ _IOWR(0x47, 0x19, struct d3dkmt_destroydevice)
+#define LX_DXDESTROYHWCONTEXT \
+ _IOWR(0x47, 0x1a, struct d3dkmt_destroyhwcontext)
+#define LX_DXDESTROYHWQUEUE \
+ _IOWR(0x47, 0x1b, struct d3dkmt_destroyhwqueue)
+#define LX_DXDESTROYPAGINGQUEUE \
+ _IOWR(0x47, 0x1c, struct d3dddi_destroypagingqueue)
+#define LX_DXDESTROYSYNCHRONIZATIONOBJECT \
+ _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject)
+#define LX_DXEVICT \
+ _IOWR(0x47, 0x1e, struct d3dkmt_evict)
+#define LX_DXFLUSHHEAPTRANSITIONS \
+ _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions)
+#define LX_DXFREEGPUVIRTUALADDRESS \
+ _IOWR(0x47, 0x20, struct d3dkmt_freegpuvirtualaddress)
+#define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \
+ _IOWR(0x47, 0x21, struct d3dkmt_getcontextinprocessschedulingpriority)
+#define LX_DXGETCONTEXTSCHEDULINGPRIORITY \
+ _IOWR(0x47, 0x22, struct d3dkmt_getcontextschedulingpriority)
+#define LX_DXGETSHAREDRESOURCEADAPTERLUID \
+ _IOWR(0x47, 0x23, struct d3dkmt_getsharedresourceadapterluid)
+#define LX_DXINVALIDATECACHE \
+ _IOWR(0x47, 0x24, struct d3dkmt_invalidatecache)
+#define LX_DXLOCK2 \
+ _IOWR(0x47, 0x25, struct d3dkmt_lock2)
+#define LX_DXMARKDEVICEASERROR \
+ _IOWR(0x47, 0x26, struct d3dkmt_markdeviceaserror)
+#define LX_DXOFFERALLOCATIONS \
+ _IOWR(0x47, 0x27, struct d3dkmt_offerallocations)
+#define LX_DXOPENRESOURCE \
+ _IOWR(0x47, 0x28, struct d3dkmt_openresource)
+#define LX_DXOPENSYNCHRONIZATIONOBJECT \
+ _IOWR(0x47, 0x29, struct d3dkmt_opensynchronizationobject)
+#define LX_DXQUERYALLOCATIONRESIDENCY \
+ _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency)
+#define LX_DXQUERYRESOURCEINFO \
+ _IOWR(0x47, 0x2b, struct d3dkmt_queryresourceinfo)
+#define LX_DXRECLAIMALLOCATIONS2 \
+ _IOWR(0x47, 0x2c, struct d3dkmt_reclaimallocations2)
+#define LX_DXRENDER \
+ _IOWR(0x47, 0x2d, struct d3dkmt_render)
+#define LX_DXSETALLOCATIONPRIORITY \
+ _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority)
+#define LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY \
+ _IOWR(0x47, 0x2f, struct d3dkmt_setcontextinprocessschedulingpriority)
+#define LX_DXSETCONTEXTSCHEDULINGPRIORITY \
+ _IOWR(0x47, 0x30, struct d3dkmt_setcontextschedulingpriority)
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \
+ _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu)
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \
+ _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu)
+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \
+ _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2)
+#define LX_DXSUBMITCOMMANDTOHWQUEUE \
+ _IOWR(0x47, 0x34, struct d3dkmt_submitcommandtohwqueue)
+#define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \
+ _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue)
+#define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \
+ _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue)
+#define LX_DXUNLOCK2 \
+ _IOWR(0x47, 0x37, struct d3dkmt_unlock2)
+#define LX_DXUPDATEALLOCPROPERTY \
+ _IOWR(0x47, 0x38, struct d3dddi_updateallocproperty)
+#define LX_DXUPDATEGPUVIRTUALADDRESS \
+ _IOWR(0x47, 0x39, struct d3dkmt_updategpuvirtualaddress)
+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \
+ _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu)
+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \
+ _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu)
+#define LX_DXGETALLOCATIONPRIORITY \
+ _IOWR(0x47, 0x3c, struct d3dkmt_getallocationpriority)
+#define LX_DXQUERYCLOCKCALIBRATION \
+ _IOWR(0x47, 0x3d, struct d3dkmt_queryclockcalibration)
+#define LX_DXENUMADAPTERS3 \
+ _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3)
+#define LX_DXSHAREOBJECTS \
+ _IOWR(0x47, 0x3f, struct d3dkmt_shareobjects)
+#define LX_DXOPENSYNCOBJECTFROMNTHANDLE2 \
+ _IOWR(0x47, 0x40, struct d3dkmt_opensyncobjectfromnthandle2)
+#define LX_DXQUERYRESOURCEINFOFROMNTHANDLE \
+ _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle)
+#define LX_DXOPENRESOURCEFROMNTHANDLE \
+ _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle)
+#define LX_DXQUERYSTATISTICS \
+ _IOWR(0x47, 0x43, struct d3dkmt_querystatistics)
+#define LX_DXSHAREOBJECTWITHHOST \
+ _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost)
+#define LX_DXCREATESYNCFILE \
+ _IOWR(0x47, 0x45, struct d3dkmt_createsyncfile)
+#define LX_DXPRESENTVIRTUAL \
+ _IOWR(0x47, 0x46, struct d3dkmt_presentvirtual)
+
+#define LX_IO_MAX 0x46
+
+#endif /* _D3DKMTHK_H */
diff --git a/virtio_dxgkrnl/ioctl.c b/virtio_dxgkrnl/ioctl.c
new file mode 100644
index 0000000..f604a46
--- /dev/null
+++ b/virtio_dxgkrnl/ioctl.c
@@ -0,0 +1,5619 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Ioctl implementation
+ *
+ */
+
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/mman.h>
+
+#include "dxgkrnl.h"
+#include "dxgvmbus.h"
+#include "dxgsyncfile.h"
+#include "virtio_dxgkrnl.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+#undef dev_fmt
+#define dev_fmt(fmt) "dxgk: " fmt
+
+struct ioctl_desc {
+ int (*ioctl_callback)(struct dxgprocess *p, void __user *arg);
+ u32 ioctl;
+ u32 arg_size;
+};
+static struct ioctl_desc ioctls[LX_IO_MAX + 1];
+
+static int dxgsyncobj_release(struct inode *inode, struct file *file)
+{
+ struct dxgsharedsyncobject *syncobj = file->private_data;
+
+ dev_dbg(dxgglobaldev, "%s: %p", __func__, syncobj);
+ mutex_lock(&syncobj->fd_mutex);
+ kref_get(&syncobj->ssyncobj_kref);
+ syncobj->host_shared_handle_nt_reference--;
+ if (syncobj->host_shared_handle_nt_reference == 0) {
+ if (syncobj->host_shared_handle_nt.v) {
+ dxgvmb_send_destroy_nt_shared_object(
+ syncobj->host_shared_handle_nt);
+ dev_dbg(dxgglobaldev, "Syncobj host_handle_nt destroyed: %x",
+ syncobj->host_shared_handle_nt.v);
+ syncobj->host_shared_handle_nt.v = 0;
+ }
+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release);
+ }
+ mutex_unlock(&syncobj->fd_mutex);
+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release);
+ return 0;
+}
+
+static const struct file_operations dxg_syncobj_fops = {
+ .release = dxgsyncobj_release,
+};
+
+static int dxgsharedresource_release(struct inode *inode, struct file *file)
+{
+ struct dxgsharedresource *resource = file->private_data;
+
+ dev_dbg(dxgglobaldev, "%s: %p", __func__, resource);
+ mutex_lock(&resource->fd_mutex);
+ kref_get(&resource->sresource_kref);
+ resource->host_shared_handle_nt_reference--;
+ if (resource->host_shared_handle_nt_reference == 0) {
+ if (resource->host_shared_handle_nt.v) {
+ dxgvmb_send_destroy_nt_shared_object(
+ resource->host_shared_handle_nt);
+ dev_dbg(dxgglobaldev,
+ "Resource host_handle_nt destroyed: %x",
+ resource->host_shared_handle_nt.v);
+ resource->host_shared_handle_nt.v = 0;
+ }
+ kref_put(&resource->sresource_kref, dxgsharedresource_destroy);
+ }
+ mutex_unlock(&resource->fd_mutex);
+ kref_put(&resource->sresource_kref, dxgsharedresource_destroy);
+ return 0;
+}
+
+static const struct file_operations dxg_resource_fops = {
+ .release = dxgsharedresource_release,
+};
+
+static int dxgk_open_adapter_from_luid(struct dxgprocess *process,
+ void *__user inargs)
+{
+ struct d3dkmt_openadapterfromluid args;
+ int ret;
+ struct dxgadapter *entry;
+ struct dxgadapter *adapter = NULL;
+ struct d3dkmt_openadapterfromluid *__user result = inargs;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s Faled to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+ dxgglobal_acquire_process_adapter_lock();
+
+ list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (dxgadapter_acquire_lock_shared(entry) == 0) {
+ dev_dbg(dxgglobaldev, "Compare luids: %d:%d %d:%d",
+ entry->luid.b, entry->luid.a,
+ args.adapter_luid.b, args.adapter_luid.a);
+ if (*(u64 *) &entry->luid ==
+ *(u64 *) &args.adapter_luid) {
+ ret =
+ dxgprocess_open_adapter(process, entry,
+ &args.adapter_handle);
+
+ if (ret >= 0) {
+ ret = copy_to_user(
+ &result->adapter_handle,
+ &args.adapter_handle,
+ sizeof(struct d3dkmthandle));
+ if (ret)
+ ret = -EINVAL;
+ }
+ adapter = entry;
+ }
+ dxgadapter_release_lock_shared(entry);
+ if (adapter)
+ break;
+ }
+ }
+
+ dxgglobal_release_process_adapter_lock();
+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+
+ if (args.adapter_handle.v == 0)
+ ret = -EINVAL;
+
+cleanup:
+
+ if (ret < 0)
+ dxgprocess_close_adapter(process, args.adapter_handle);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int dxgk_query_statistics(struct dxgprocess *process,
+ void __user *inargs)
+{
+ struct d3dkmt_querystatistics *args;
+ int ret;
+ struct dxgadapter *entry;
+ struct dxgadapter *adapter = NULL;
+ struct winluid tmp;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ args = vzalloc(sizeof(struct d3dkmt_querystatistics));
+ if (args == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = copy_from_user(args, inargs, sizeof(*args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+ list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (dxgadapter_acquire_lock_shared(entry) == 0) {
+ if (*(u64 *) &entry->luid ==
+ *(u64 *) &args->adapter_luid) {
+ adapter = entry;
+ break;
+ }
+ dxgadapter_release_lock_shared(entry);
+ }
+ }
+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+ if (adapter) {
+ tmp = args->adapter_luid;
+ args->adapter_luid = adapter->host_adapter_luid;
+ ret = dxgvmb_send_query_statistics(process, adapter, args);
+ if (ret >= 0) {
+ args->adapter_luid = tmp;
+ ret = copy_to_user(inargs, args, sizeof(*args));
+ if (ret) {
+ pr_err("%s failed to copy args", __func__);
+ ret = -EINVAL;
+ }
+ }
+ dxgadapter_release_lock_shared(adapter);
+ }
+
+cleanup:
+ if (args)
+ vfree(args);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgkp_enum_adapters(struct dxgprocess *process,
+ union d3dkmt_enumadapters_filter filter,
+ u32 adapter_count_max,
+ struct d3dkmt_adapterinfo *__user info_out,
+ u32 * __user adapter_count_out)
+{
+ int ret = 0;
+ struct dxgadapter *entry;
+ struct d3dkmt_adapterinfo *info = NULL;
+ struct dxgadapter **adapters = NULL;
+ int adapter_count = 0;
+ int i;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ if (info_out == NULL || adapter_count_max == 0) {
+ dev_dbg(dxgglobaldev, "buffer is NULL");
+ ret = copy_to_user(adapter_count_out,
+ &dxgglobal->num_adapters, sizeof(u32));
+ if (ret) {
+ pr_err("%s copy_to_user faled", __func__);
+ ret = -EINVAL;
+ }
+ goto cleanup;
+ }
+
+ if (adapter_count_max > 0xFFFF) {
+ pr_err("too many adapters");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ info = vzalloc(sizeof(struct d3dkmt_adapterinfo) * adapter_count_max);
+ if (info == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ adapters = vzalloc(sizeof(struct dxgadapter *) * adapter_count_max);
+ if (adapters == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+ dxgglobal_acquire_process_adapter_lock();
+
+ list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (dxgadapter_acquire_lock_shared(entry) == 0) {
+ struct d3dkmt_adapterinfo *inf = &info[adapter_count];
+
+ ret = dxgprocess_open_adapter(process, entry,
+ &inf->adapter_handle);
+ if (ret >= 0) {
+ inf->adapter_luid = entry->luid;
+ adapters[adapter_count] = entry;
+ dev_dbg(dxgglobaldev, "adapter: %x %x:%x",
+ inf->adapter_handle.v,
+ inf->adapter_luid.b,
+ inf->adapter_luid.a);
+ adapter_count++;
+ }
+ dxgadapter_release_lock_shared(entry);
+ }
+ if (ret < 0)
+ break;
+ }
+
+ dxgglobal_release_process_adapter_lock();
+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+
+ if (adapter_count > adapter_count_max) {
+ ret = STATUS_BUFFER_TOO_SMALL;
+ dev_dbg(dxgglobaldev, "Too many adapters");
+ ret = copy_to_user(adapter_count_out,
+ &dxgglobal->num_adapters, sizeof(u32));
+ if (ret) {
+ pr_err("%s copy_to_user failed", __func__);
+ ret = -EINVAL;
+ }
+ goto cleanup;
+ }
+
+ ret = copy_to_user(adapter_count_out, &adapter_count,
+ sizeof(adapter_count));
+ if (ret) {
+ pr_err("%s failed to copy adapter_count", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(info_out, info, sizeof(info[0]) * adapter_count);
+ if (ret) {
+ pr_err("%s failed to copy adapter info", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (ret >= 0) {
+ dev_dbg(dxgglobaldev, "found %d adapters", adapter_count);
+ goto success;
+ }
+ if (info) {
+ for (i = 0; i < adapter_count; i++)
+ dxgprocess_close_adapter(process,
+ info[i].adapter_handle);
+ }
+success:
+ if (info)
+ vfree(info);
+ if (adapters)
+ vfree(adapters);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int dxgsharedresource_seal(struct dxgsharedresource *shared_resource)
+{
+ int ret = 0;
+ int i = 0;
+ u8 *private_data;
+ u32 data_size;
+ struct dxgresource *resource;
+ struct dxgallocation *alloc;
+
+ dev_dbg(dxgglobaldev, "Sealing resource: %p", shared_resource);
+
+ down_write(&shared_resource->adapter->shared_resource_list_lock);
+ if (shared_resource->sealed) {
+ dev_dbg(dxgglobaldev, "Resource already sealed");
+ goto cleanup;
+ }
+ shared_resource->sealed = 1;
+ if (!list_empty(&shared_resource->resource_list_head)) {
+ resource =
+ list_first_entry(&shared_resource->resource_list_head,
+ struct dxgresource,
+ shared_resource_list_entry);
+ dev_dbg(dxgglobaldev, "First resource: %p", resource);
+ mutex_lock(&resource->resource_mutex);
+ list_for_each_entry(alloc, &resource->alloc_list_head,
+ alloc_list_entry) {
+ dev_dbg(dxgglobaldev, "Resource alloc: %p %d", alloc,
+ alloc->priv_drv_data->data_size);
+ shared_resource->allocation_count++;
+ shared_resource->alloc_private_data_size +=
+ alloc->priv_drv_data->data_size;
+ if (shared_resource->alloc_private_data_size <
+ alloc->priv_drv_data->data_size) {
+ pr_err("alloc private data overflow");
+ ret = -EINVAL;
+ goto cleanup1;
+ }
+ }
+ if (shared_resource->alloc_private_data_size == 0) {
+ ret = -EINVAL;
+ goto cleanup1;
+ }
+ shared_resource->alloc_private_data =
+ vzalloc(shared_resource->alloc_private_data_size);
+ if (shared_resource->alloc_private_data == NULL) {
+ ret = -EINVAL;
+ goto cleanup1;
+ }
+ shared_resource->alloc_private_data_sizes =
+ vzalloc(sizeof(u32)*shared_resource->allocation_count);
+ if (shared_resource->alloc_private_data_sizes == NULL) {
+ ret = -EINVAL;
+ goto cleanup1;
+ }
+ private_data = shared_resource->alloc_private_data;
+ data_size = shared_resource->alloc_private_data_size;
+ i = 0;
+ list_for_each_entry(alloc, &resource->alloc_list_head,
+ alloc_list_entry) {
+ u32 alloc_data_size = alloc->priv_drv_data->data_size;
+
+ if (alloc_data_size) {
+ if (data_size < alloc_data_size) {
+ pr_err("Invalid private data size");
+ ret = -EINVAL;
+ goto cleanup1;
+ }
+ shared_resource->alloc_private_data_sizes[i] =
+ alloc_data_size;
+ memcpy(private_data,
+ alloc->priv_drv_data->data,
+ alloc_data_size);
+ vfree(alloc->priv_drv_data);
+ alloc->priv_drv_data = NULL;
+ private_data += alloc_data_size;
+ data_size -= alloc_data_size;
+ }
+ i++;
+ }
+ if (data_size != 0) {
+ pr_err("Data size mismatch");
+ ret = -EINVAL;
+ }
+cleanup1:
+ mutex_unlock(&resource->resource_mutex);
+ }
+cleanup:
+ up_write(&shared_resource->adapter->shared_resource_list_lock);
+ return ret;
+}
+
+static int
+dxgk_enum_adapters(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_enumadapters2 args;
+ int ret;
+ struct dxgadapter *entry;
+ struct d3dkmt_adapterinfo *info = NULL;
+ struct dxgadapter **adapters = NULL;
+ int adapter_count = 0;
+ int i;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.adapters == NULL) {
+ dev_dbg(dxgglobaldev, "buffer is NULL");
+ args.num_adapters = dxgglobal->num_adapters;
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy args to user", __func__);
+ ret = -EINVAL;
+ }
+ goto cleanup;
+ }
+ if (args.num_adapters < dxgglobal->num_adapters) {
+ args.num_adapters = dxgglobal->num_adapters;
+ dev_dbg(dxgglobaldev, "buffer is too small");
+ ret = -EOVERFLOW;
+ goto cleanup;
+ }
+
+ if (args.num_adapters > D3DKMT_ADAPTERS_MAX) {
+ dev_dbg(dxgglobaldev, "too many adapters");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ info = vzalloc(sizeof(struct d3dkmt_adapterinfo) * args.num_adapters);
+ if (info == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ adapters = vzalloc(sizeof(struct dxgadapter *) * args.num_adapters);
+ if (adapters == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED);
+ dxgglobal_acquire_process_adapter_lock();
+
+ list_for_each_entry(entry, &dxgglobal->adapter_list_head,
+ adapter_list_entry) {
+ if (dxgadapter_acquire_lock_shared(entry) == 0) {
+ struct d3dkmt_adapterinfo *inf = &info[adapter_count];
+
+ ret = dxgprocess_open_adapter(process, entry,
+ &inf->adapter_handle);
+ if (ret >= 0) {
+ inf->adapter_luid = entry->luid;
+ adapters[adapter_count] = entry;
+ dev_dbg(dxgglobaldev, "adapter: %x %llx",
+ inf->adapter_handle.v,
+ *(u64 *) &inf->adapter_luid);
+ adapter_count++;
+ }
+ dxgadapter_release_lock_shared(entry);
+ }
+ if (ret < 0)
+ break;
+ }
+
+ dxgglobal_release_process_adapter_lock();
+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED);
+
+ args.num_adapters = adapter_count;
+
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy args to user", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_to_user(args.adapters, info,
+ sizeof(info[0]) * args.num_adapters);
+ if (ret) {
+ pr_err("%s failed to copy adapter info to user", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ if (info) {
+ for (i = 0; i < args.num_adapters; i++) {
+ dxgprocess_close_adapter(process,
+ info[i].adapter_handle);
+ }
+ }
+ } else {
+ dev_dbg(dxgglobaldev, "found %d adapters", args.num_adapters);
+ }
+
+ if (info)
+ vfree(info);
+ if (adapters)
+ vfree(adapters);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_enum_adapters3(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_enumadapters3 args;
+ int ret;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgkp_enum_adapters(process, args.filter,
+ args.adapter_count,
+ args.adapters,
+ &((struct d3dkmt_enumadapters3 *)inargs)->
+ adapter_count);
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_close_adapter(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmthandle args;
+ int ret;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgprocess_close_adapter(process, args);
+ if (ret < 0)
+ pr_err("%s failed", __func__);
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_query_adapter_info(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_queryadapterinfo args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.private_data_size > DXG_MAX_VM_BUS_PACKET_SIZE ||
+ args.private_data_size == 0) {
+ pr_err("invalid private data size");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "Type: %d Size: %x",
+ args.type, args.private_data_size);
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = dxgvmb_send_query_adapter_info(process, adapter, &args);
+
+ dxgadapter_release_lock_shared(adapter);
+
+cleanup:
+
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_create_device(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_createdevice args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ struct d3dkmthandle host_device_handle = {};
+ bool adapter_locked = false;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* The call acquires reference on the adapter */
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgdevice_create(adapter, process);
+ if (device == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0)
+ goto cleanup;
+
+ adapter_locked = true;
+
+ host_device_handle = dxgvmb_send_create_device(adapter, process, &args);
+ if (host_device_handle.v) {
+ ret = copy_to_user(&((struct d3dkmt_createdevice *)inargs)->
+ device, &host_device_handle,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy device handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(&process->handle_table, device,
+ HMGRENTRY_TYPE_DXGDEVICE,
+ host_device_handle);
+ if (ret >= 0) {
+ device->handle = host_device_handle;
+ device->handle_valid = 1;
+ device->object_state = DXGOBJECTSTATE_ACTIVE;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ if (host_device_handle.v)
+ dxgvmb_send_destroy_device(adapter, process,
+ host_device_handle);
+ if (device)
+ dxgdevice_destroy(device);
+ }
+
+ if (adapter_locked)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_destroy_device(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_destroydevice args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ device = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGDEVICE,
+ args.device, true);
+ if (device) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGDEVICE, args.device);
+ device->handle_valid = 0;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (device == NULL) {
+ pr_err("invalid device handle: %x", args.device.v);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+
+ dxgdevice_destroy(device);
+
+ if (dxgadapter_acquire_lock_shared(adapter) == 0) {
+ dxgvmb_send_destroy_device(adapter, process, args.device);
+ dxgadapter_release_lock_shared(adapter);
+ }
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_create_context_virtual(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_createcontextvirtual args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ struct dxgcontext *context = NULL;
+ struct d3dkmthandle host_context_handle = {};
+ bool device_lock_acquired = false;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0)
+ goto cleanup;
+
+ device_lock_acquired = true;
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ context = dxgcontext_create(device);
+ if (context == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ host_context_handle = dxgvmb_send_create_context(adapter,
+ process, &args);
+ if (host_context_handle.v) {
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(&process->handle_table, context,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ host_context_handle);
+ if (ret >= 0)
+ context->handle = host_context_handle;
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ if (ret < 0)
+ goto cleanup;
+ ret = copy_to_user(&((struct d3dkmt_createcontextvirtual *)
+ inargs)->context, &host_context_handle,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy context handle", __func__);
+ ret = -EINVAL;
+ }
+ } else {
+ pr_err("invalid host handle");
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ if (host_context_handle.v) {
+ dxgvmb_send_destroy_context(adapter, process,
+ host_context_handle);
+ }
+ if (context)
+ dxgcontext_destroy_safe(process, context);
+ }
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device) {
+ if (device_lock_acquired)
+ dxgdevice_release_lock_shared(device);
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_destroy_context(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_destroycontext args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgcontext *context = NULL;
+ struct dxgdevice *device = NULL;
+ struct d3dkmthandle device_handle = {};
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ context = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context, true);
+ if (context) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGCONTEXT, args.context);
+ context->handle.v = 0;
+ device_handle = context->device_handle;
+ context->object_state = DXGOBJECTSTATE_DESTROYED;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (context == NULL) {
+ pr_err("invalid context handle: %x", args.context.v);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, device_handle);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_destroy_context(adapter, process, args.context);
+
+ dxgcontext_destroy_safe(process, context);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int dxgk_create_hwcontext(struct dxgprocess *process,
+ void *__user inargs)
+{
+ /* This is obsolete entry point */
+ return -ENOTTY;
+}
+
+static int dxgk_destroy_hwcontext(struct dxgprocess *process,
+ void *__user inargs)
+{
+ /* This is obsolete entry point */
+ return -ENOTTY;
+}
+
+static int
+dxgk_create_hwqueue(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_createhwqueue args;
+ struct dxgdevice *device = NULL;
+ struct dxgcontext *context = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct dxghwqueue *hwqueue = NULL;
+ int ret;
+ bool device_lock_acquired = false;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0)
+ goto cleanup;
+
+ device_lock_acquired = true;
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+ context = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context, true);
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+ if (context == NULL) {
+ pr_err("Invalid context handle %x", args.context.v);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hwqueue = dxghwqueue_create(context);
+ if (hwqueue == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_create_hwqueue(process, adapter, &args,
+ inargs, hwqueue);
+
+cleanup:
+
+ if (ret < 0 && hwqueue)
+ dxghwqueue_destroy(process, hwqueue);
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device_lock_acquired)
+ dxgdevice_release_lock_shared(device);
+
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int dxgk_destroy_hwqueue(struct dxgprocess *process,
+ void *__user inargs)
+{
+ struct d3dkmt_destroyhwqueue args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ struct dxghwqueue *hwqueue = NULL;
+ struct d3dkmthandle device_handle = {};
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ hwqueue = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ args.queue, true);
+ if (hwqueue) {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGHWQUEUE, args.queue);
+ hwqueue->handle.v = 0;
+ device_handle = hwqueue->device_handle;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (hwqueue == NULL) {
+ pr_err("invalid hwqueue handle: %x", args.queue.v);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, device_handle);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_destroy_hwqueue(process, adapter, args.queue);
+
+ dxghwqueue_destroy(process, hwqueue);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_create_paging_queue(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_createpagingqueue args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct dxgpagingqueue *pqueue = NULL;
+ int ret;
+ struct d3dkmthandle host_handle = {};
+ bool device_lock_acquired = false;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0)
+ goto cleanup;
+
+ device_lock_acquired = true;
+ adapter = device->adapter;
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ pqueue = dxgpagingqueue_create(device);
+ if (pqueue == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_create_paging_queue(process, device, &args, pqueue);
+ if (ret >= 0) {
+ host_handle = args.paging_queue;
+
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(&process->handle_table, pqueue,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ host_handle);
+ if (ret >= 0) {
+ pqueue->handle = host_handle;
+ ret = hmgrtable_assign_handle(&process->handle_table,
+ NULL,
+ HMGRENTRY_TYPE_MONITOREDFENCE,
+ args.sync_object);
+ if (ret >= 0)
+ pqueue->syncobj_handle = args.sync_object;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ /* should not fail after this */
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ if (pqueue)
+ dxgpagingqueue_destroy(pqueue);
+ if (host_handle.v)
+ dxgvmb_send_destroy_paging_queue(process,
+ adapter,
+ host_handle);
+ }
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device) {
+ if (device_lock_acquired)
+ dxgdevice_release_lock_shared(device);
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_destroy_paging_queue(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dddi_destroypagingqueue args;
+ struct dxgpagingqueue *paging_queue = NULL;
+ int ret;
+ struct d3dkmthandle device_handle = {};
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ paging_queue = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.paging_queue, true);
+ if (paging_queue) {
+ device_handle = paging_queue->device_handle;
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.paging_queue);
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_MONITOREDFENCE,
+ paging_queue->syncobj_handle);
+ paging_queue->syncobj_handle.v = 0;
+ paging_queue->handle.v = 0;
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ if (device_handle.v)
+ device = dxgprocess_device_by_handle(process, device_handle);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0) {
+ kref_put(&device->device_kref, dxgdevice_release);
+ device = NULL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_destroy_paging_queue(process, adapter,
+ args.paging_queue);
+
+ dxgpagingqueue_destroy(paging_queue);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device) {
+ dxgdevice_release_lock_shared(device);
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+get_standard_alloc_priv_data(struct dxgdevice *device,
+ struct d3dkmt_createstandardallocation *alloc_info,
+ u32 *standard_alloc_priv_data_size,
+ void **standard_alloc_priv_data,
+ u32 *standard_res_priv_data_size,
+ void **standard_res_priv_data)
+{
+ int ret;
+ struct d3dkmdt_gdisurfacedata gdi_data = { };
+ u32 priv_data_size = 0;
+ u32 res_priv_data_size = 0;
+ void *priv_data = NULL;
+ void *res_priv_data = NULL;
+
+ gdi_data.type = _D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER;
+ gdi_data.width = alloc_info->existing_heap_data.size;
+ gdi_data.height = 1;
+ gdi_data.format = _D3DDDIFMT_UNKNOWN;
+
+ *standard_alloc_priv_data_size = 0;
+ ret = dxgvmb_send_get_stdalloc_data(device,
+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE,
+ &gdi_data, 0,
+ &priv_data_size, NULL,
+ &res_priv_data_size,
+ NULL);
+ if (ret < 0)
+ goto cleanup;
+ dev_dbg(dxgglobaldev, "Priv data size: %d", priv_data_size);
+ if (priv_data_size == 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ priv_data = vzalloc(priv_data_size);
+ if (priv_data == NULL) {
+ ret = -ENOMEM;
+ pr_err("failed to allocate memory for priv data: %d",
+ priv_data_size);
+ goto cleanup;
+ }
+ if (res_priv_data_size) {
+ res_priv_data = vzalloc(res_priv_data_size);
+ if (res_priv_data == NULL) {
+ ret = -ENOMEM;
+ pr_err("failed to alloc memory for res priv data: %d",
+ res_priv_data_size);
+ goto cleanup;
+ }
+ }
+ ret = dxgvmb_send_get_stdalloc_data(device,
+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE,
+ &gdi_data, 0,
+ &priv_data_size,
+ priv_data,
+ &res_priv_data_size,
+ res_priv_data);
+ if (ret < 0)
+ goto cleanup;
+ *standard_alloc_priv_data_size = priv_data_size;
+ *standard_alloc_priv_data = priv_data;
+ *standard_res_priv_data_size = res_priv_data_size;
+ *standard_res_priv_data = res_priv_data;
+ priv_data = NULL;
+ res_priv_data = NULL;
+
+cleanup:
+ if (priv_data)
+ vfree(priv_data);
+ if (res_priv_data)
+ vfree(res_priv_data);
+ if (ret)
+ dev_dbg(dxgglobaldev, "err: %s %d", __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_create_allocation(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_createallocation args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ struct d3dddi_allocationinfo *alloc_info = NULL;
+ struct d3dkmt_createstandardallocation standard_alloc;
+ u32 alloc_info_size = 0;
+ struct dxgresource *resource = NULL;
+ struct dxgallocation **dxgalloc = NULL;
+ struct dxgsharedresource *shared_resource = NULL;
+ bool resource_mutex_acquired = false;
+ u32 standard_alloc_priv_data_size = 0;
+ void *standard_alloc_priv_data = NULL;
+ u32 res_priv_data_size = 0;
+ void *res_priv_data = NULL;
+ int i;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX ||
+ args.alloc_count == 0) {
+ pr_err("invalid number of allocations to create");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ alloc_info_size = sizeof(struct d3dddi_allocationinfo) *
+ args.alloc_count;
+ alloc_info = vzalloc(alloc_info_size);
+ if (alloc_info == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(alloc_info, args.allocation_info,
+ alloc_info_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc info", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ for (i = 0; i < args.alloc_count; i++) {
+ if (args.flags.standard_allocation) {
+ if (alloc_info[i].priv_drv_data_size != 0) {
+ pr_err("private data size is not zero");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ if (alloc_info[i].priv_drv_data_size >=
+ DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("private data size is too big: %d %d %ld",
+ i, alloc_info[i].priv_drv_data_size,
+ sizeof(alloc_info[0]));
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ if (args.flags.existing_section || args.flags.create_protected) {
+ pr_err("invalid allocation flags");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.flags.standard_allocation) {
+ if (args.standard_allocation == NULL) {
+ pr_err("invalid standard allocation");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ ret = copy_from_user(&standard_alloc,
+ args.standard_allocation,
+ sizeof(standard_alloc));
+ if (ret) {
+ pr_err("%s failed to copy std alloc data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (standard_alloc.type ==
+ _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP) {
+ if (alloc_info[0].sysmem == NULL ||
+ (unsigned long)alloc_info[0].sysmem &
+ (PAGE_SIZE - 1)) {
+ pr_err("invalid sysmem pointer");
+ ret = STATUS_INVALID_PARAMETER;
+ goto cleanup;
+ }
+ if (!args.flags.existing_sysmem) {
+ pr_err("expected existing_sysmem flag");
+ ret = STATUS_INVALID_PARAMETER;
+ goto cleanup;
+ }
+ } else if (standard_alloc.type ==
+ _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER) {
+ if (args.flags.existing_sysmem) {
+ pr_err("existing_sysmem flag is invalid");
+ ret = STATUS_INVALID_PARAMETER;
+ goto cleanup;
+
+ }
+ if (alloc_info[0].sysmem != NULL) {
+ pr_err("sysmem should be NULL");
+ ret = STATUS_INVALID_PARAMETER;
+ goto cleanup;
+ }
+ } else {
+ pr_err("invalid standard allocation type");
+ ret = STATUS_INVALID_PARAMETER;
+ goto cleanup;
+ }
+
+ if (args.priv_drv_data_size != 0 ||
+ args.alloc_count != 1 ||
+ standard_alloc.existing_heap_data.size == 0 ||
+ standard_alloc.existing_heap_data.size & (PAGE_SIZE - 1)) {
+ pr_err("invalid standard allocation");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ args.priv_drv_data_size =
+ sizeof(struct d3dkmt_createstandardallocation);
+ }
+
+ if (args.flags.create_shared && !args.flags.create_resource) {
+ pr_err("create_resource must be set for create_shared");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0) {
+ kref_put(&device->device_kref, dxgdevice_release);
+ device = NULL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ if (args.flags.standard_allocation) {
+ ret = get_standard_alloc_priv_data(device,
+ &standard_alloc,
+ &standard_alloc_priv_data_size,
+ &standard_alloc_priv_data,
+ &res_priv_data_size,
+ &res_priv_data);
+ if (ret < 0)
+ goto cleanup;
+ dev_dbg(dxgglobaldev, "Alloc private data: %d",
+ standard_alloc_priv_data_size);
+ }
+
+ if (args.flags.create_resource) {
+ resource = dxgresource_create(device);
+ if (resource == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ resource->private_runtime_handle =
+ args.private_runtime_resource_handle;
+ if (args.flags.create_shared) {
+ if (!args.flags.nt_security_sharing) {
+ dev_err(dxgglobaldev,
+ "%s: nt_security_sharing must be set",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ shared_resource = dxgsharedresource_create(adapter);
+ if (shared_resource == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ shared_resource->runtime_private_data_size =
+ args.priv_drv_data_size;
+ shared_resource->resource_private_data_size =
+ args.priv_drv_data_size;
+
+ shared_resource->runtime_private_data_size =
+ args.private_runtime_data_size;
+ shared_resource->resource_private_data_size =
+ args.priv_drv_data_size;
+ dxgsharedresource_add_resource(shared_resource,
+ resource);
+ if (args.flags.standard_allocation) {
+ shared_resource->resource_private_data =
+ res_priv_data;
+ shared_resource->resource_private_data_size =
+ res_priv_data_size;
+ res_priv_data = NULL;
+ }
+ if (args.private_runtime_data_size) {
+ shared_resource->runtime_private_data =
+ vzalloc(args.private_runtime_data_size);
+ if (shared_resource->runtime_private_data ==
+ NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(
+ shared_resource->runtime_private_data,
+ args.private_runtime_data,
+ args.private_runtime_data_size);
+ if (ret) {
+ pr_err("%s failed to copy runtime data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ if (args.priv_drv_data_size &&
+ !args.flags.standard_allocation) {
+ shared_resource->resource_private_data =
+ vzalloc(args.priv_drv_data_size);
+ if (shared_resource->resource_private_data ==
+ NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(
+ shared_resource->resource_private_data,
+ args.priv_drv_data,
+ args.priv_drv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy res data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ }
+ } else {
+ if (args.resource.v) {
+ /* Adding new allocations to the given resource */
+
+ dxgprocess_ht_lock_shared_down(process);
+ resource = hmgrtable_get_object_by_type(
+ &process->handle_table,
+ HMGRENTRY_TYPE_DXGRESOURCE,
+ args.resource, true);
+ kref_get(&resource->resource_kref);
+ dxgprocess_ht_lock_shared_up(process);
+
+ if (resource == NULL || resource->device != device) {
+ pr_err("invalid resource handle %x",
+ args.resource.v);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (resource->shared_owner &&
+ resource->shared_owner->sealed) {
+ pr_err("Resource is sealed");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ /* Synchronize with resource destruction */
+ mutex_lock(&resource->resource_mutex);
+ if (!dxgresource_is_active(resource)) {
+ mutex_unlock(&resource->resource_mutex);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ resource_mutex_acquired = true;
+ }
+ }
+
+ dxgalloc = vzalloc(sizeof(struct dxgallocation *) * args.alloc_count);
+ if (dxgalloc == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ for (i = 0; i < args.alloc_count; i++) {
+ struct dxgallocation *alloc;
+ u32 priv_data_size;
+
+ if (args.flags.standard_allocation)
+ priv_data_size = standard_alloc_priv_data_size;
+ else
+ priv_data_size = alloc_info[i].priv_drv_data_size;
+
+ if (alloc_info[i].sysmem && !args.flags.standard_allocation) {
+ if ((unsigned long)
+ alloc_info[i].sysmem & (PAGE_SIZE - 1)) {
+ pr_err("invalid sysmem alloc %d, %p",
+ i, alloc_info[i].sysmem);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ if ((alloc_info[0].sysmem == NULL) !=
+ (alloc_info[i].sysmem == NULL)) {
+ pr_err("All allocations must have sysmem pointer");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dxgalloc[i] = dxgallocation_create(process);
+ if (dxgalloc[i] == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ alloc = dxgalloc[i];
+
+ if (resource) {
+ ret = dxgresource_add_alloc(resource, alloc);
+ if (ret < 0)
+ goto cleanup;
+ } else {
+ dxgdevice_add_alloc(device, alloc);
+ }
+ if (args.flags.create_shared) {
+ /* Remember alloc private data to use it during open */
+ alloc->priv_drv_data = vzalloc(priv_data_size +
+ offsetof(struct privdata, data) - 1);
+ if (alloc->priv_drv_data == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ if (args.flags.standard_allocation) {
+ memcpy(alloc->priv_drv_data->data,
+ standard_alloc_priv_data,
+ standard_alloc_priv_data_size);
+ alloc->priv_drv_data->data_size =
+ standard_alloc_priv_data_size;
+ } else {
+ ret = copy_from_user(
+ alloc->priv_drv_data->data,
+ alloc_info[i].priv_drv_data,
+ priv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy priv data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ alloc->priv_drv_data->data_size =
+ priv_data_size;
+ }
+ }
+ }
+
+ ret = dxgvmb_send_create_allocation(process, device, &args, inargs,
+ resource, dxgalloc, alloc_info,
+ &standard_alloc);
+
+ // Temporary WA for lack of output privateData fields for shared resources
+ if (ret >= 0 && args.flags.create_shared) {
+ for (i = 0; i < args.alloc_count; i++) {
+ u32 priv_data_size;
+
+ if (args.flags.standard_allocation)
+ priv_data_size = standard_alloc_priv_data_size;
+ else
+ priv_data_size = alloc_info[i].priv_drv_data_size;
+
+ /* Remember dxgalloc[i] private data to use it during open */
+ dxgalloc[i]->priv_drv_data = vzalloc(priv_data_size +
+ offsetof(struct privdata, data) - 1);
+ if (dxgalloc[i]->priv_drv_data == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ if (args.flags.standard_allocation) {
+ memcpy(dxgalloc[i]->priv_drv_data->data,
+ standard_alloc_priv_data,
+ standard_alloc_priv_data_size);
+ dxgalloc[i]->priv_drv_data->data_size =
+ standard_alloc_priv_data_size;
+ } else {
+ ret = copy_from_user(
+ dxgalloc[i]->priv_drv_data->data,
+ alloc_info[i].priv_drv_data,
+ priv_data_size);
+ if (ret) {
+ pr_err("%s failed to copy priv data",
+ __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ dxgalloc[i]->priv_drv_data->data_size =
+ priv_data_size;
+ }
+ }
+ }
+cleanup:
+
+ if (resource_mutex_acquired) {
+ mutex_unlock(&resource->resource_mutex);
+ kref_put(&resource->resource_kref, dxgresource_release);
+ }
+ if (ret < 0) {
+ if (dxgalloc) {
+ for (i = 0; i < args.alloc_count; i++) {
+ if (dxgalloc[i])
+ dxgallocation_destroy(dxgalloc[i]);
+ }
+ }
+ if (resource && args.flags.create_resource) {
+ if (shared_resource) {
+ dxgsharedresource_remove_resource
+ (shared_resource, resource);
+ }
+ dxgresource_destroy(resource);
+ }
+ }
+ if (shared_resource)
+ kref_put(&shared_resource->sresource_kref,
+ dxgsharedresource_destroy);
+ if (dxgalloc)
+ vfree(dxgalloc);
+ if (standard_alloc_priv_data)
+ vfree(standard_alloc_priv_data);
+ if (res_priv_data)
+ vfree(res_priv_data);
+ if (alloc_info)
+ vfree(alloc_info);
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device) {
+ dxgdevice_release_lock_shared(device);
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+int validate_alloc(struct dxgallocation *alloc0,
+ struct dxgallocation *alloc,
+ struct dxgdevice *device,
+ struct d3dkmthandle alloc_handle)
+{
+ u32 fail_reason;
+
+ if (alloc == NULL) {
+ fail_reason = 1;
+ goto cleanup;
+ }
+ if (alloc->resource_owner != alloc0->resource_owner) {
+ fail_reason = 2;
+ goto cleanup;
+ }
+ if (alloc->resource_owner) {
+ if (alloc->owner.resource != alloc0->owner.resource) {
+ fail_reason = 3;
+ goto cleanup;
+ }
+ if (alloc->owner.resource->device != device) {
+ fail_reason = 4;
+ goto cleanup;
+ }
+ if (alloc->owner.resource->shared_owner) {
+ fail_reason = 5;
+ goto cleanup;
+ }
+ } else {
+ if (alloc->owner.device != device) {
+ fail_reason = 6;
+ goto cleanup;
+ }
+ }
+ return 0;
+cleanup:
+ pr_err("Alloc validation failed: reason: %d %x",
+ fail_reason, alloc_handle.v);
+ return -EINVAL;
+}
+
+static int
+dxgk_destroy_allocation(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_destroyallocation2 args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int ret;
+ struct d3dkmthandle *alloc_handles = NULL;
+ struct dxgallocation **allocs = NULL;
+ struct dxgresource *resource = NULL;
+ int i;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX ||
+ ((args.alloc_count == 0) == (args.resource.v == 0))) {
+ pr_err("invalid number of allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.alloc_count) {
+ u32 handle_size = sizeof(struct d3dkmthandle) *
+ args.alloc_count;
+
+ alloc_handles = vzalloc(handle_size);
+ if (alloc_handles == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ allocs = vzalloc(sizeof(struct dxgallocation *) *
+ args.alloc_count);
+ if (allocs == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(alloc_handles, args.allocations,
+ handle_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Acquire the device lock to synchronize with the device destriction */
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0) {
+ kref_put(&device->device_kref, dxgdevice_release);
+ device = NULL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ /*
+ * Destroy the local allocation handles first. If the host handle
+ * is destroyed first, another object could be assigned to the process
+ * table at the same place as the allocation handle and it will fail.
+ */
+ if (args.alloc_count) {
+ dxgprocess_ht_lock_exclusive_down(process);
+ for (i = 0; i < args.alloc_count; i++) {
+ allocs[i] =
+ hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ alloc_handles[i], true);
+ ret =
+ validate_alloc(allocs[0], allocs[i], device,
+ alloc_handles[i]);
+ if (ret < 0) {
+ dxgprocess_ht_lock_exclusive_up(process);
+ goto cleanup;
+ }
+ }
+ dxgprocess_ht_lock_exclusive_up(process);
+ for (i = 0; i < args.alloc_count; i++)
+ dxgallocation_free_handle(allocs[i]);
+ } else {
+ struct dxgallocation *alloc;
+
+ dxgprocess_ht_lock_exclusive_down(process);
+ resource = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGRESOURCE,
+ args.resource, true);
+ if (resource == NULL) {
+ pr_err("Invalid resource handle: %x",
+ args.resource.v);
+ ret = -EINVAL;
+ } else if (resource->device != device) {
+ pr_err("Resource belongs to wrong device: %x",
+ args.resource.v);
+ ret = -EINVAL;
+ } else {
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGRESOURCE,
+ args.resource);
+ resource->object_state = DXGOBJECTSTATE_DESTROYED;
+ resource->handle.v = 0;
+ resource->handle_valid = 0;
+ }
+ dxgprocess_ht_lock_exclusive_up(process);
+
+ if (ret < 0)
+ goto cleanup;
+
+ dxgdevice_acquire_alloc_list_lock_shared(device);
+ list_for_each_entry(alloc, &resource->alloc_list_head,
+ alloc_list_entry) {
+ dxgallocation_free_handle(alloc);
+ }
+ dxgdevice_release_alloc_list_lock_shared(device);
+ }
+
+ if (args.alloc_count && allocs[0]->resource_owner)
+ resource = allocs[0]->owner.resource;
+
+ if (resource) {
+ kref_get(&resource->resource_kref);
+ mutex_lock(&resource->resource_mutex);
+ }
+
+ ret = dxgvmb_send_destroy_allocation(process, device, &args,
+ alloc_handles);
+
+ /*
+ * Destroy the allocations after the host destroyed it.
+ * The allocation gpadl teardown will wait until the host unmaps its
+ * gpadl.
+ */
+ dxgdevice_acquire_alloc_list_lock(device);
+ if (args.alloc_count) {
+ for (i = 0; i < args.alloc_count; i++) {
+ if (allocs[i]) {
+ allocs[i]->alloc_handle.v = 0;
+ dxgallocation_destroy(allocs[i]);
+ }
+ }
+ } else {
+ dxgresource_destroy(resource);
+ }
+ dxgdevice_release_alloc_list_lock(device);
+
+ if (resource) {
+ mutex_unlock(&resource->resource_mutex);
+ kref_put(&resource->resource_kref, dxgresource_release);
+ }
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device) {
+ dxgdevice_release_lock_shared(device);
+ kref_put(&device->device_kref, dxgdevice_release);
+ }
+
+ if (alloc_handles)
+ vfree(alloc_handles);
+
+ if (allocs)
+ vfree(allocs);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_make_resident(struct dxgprocess *process, void *__user inargs)
+{
+ int ret, ret2;
+ struct d3dddi_makeresident args;
+ struct d3dddi_makeresident *input = inargs;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.alloc_count > D3DKMT_MAKERESIDENT_ALLOC_MAX ||
+ args.alloc_count == 0) {
+ pr_err("invalid number of allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (args.paging_queue.v == 0) {
+ pr_err("paging queue is missing");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.paging_queue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_make_resident(process, adapter, &args);
+ if (ret < 0)
+ goto cleanup;
+ /* STATUS_PENING is a success code > 0. It is returned to user mode */
+ if (!(ret == STATUS_PENDING || ret == 0)) {
+ pr_err("%s Unexpected error %x", __func__, ret);
+ goto cleanup;
+ }
+
+ ret2 = copy_to_user(&input->paging_fence_value,
+ &args.paging_fence_value, sizeof(u64));
+ if (ret2) {
+ pr_err("%s failed to copy paging fence", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret2 = copy_to_user(&input->num_bytes_to_trim,
+ &args.num_bytes_to_trim, sizeof(u64));
+ if (ret2) {
+ pr_err("%s failed to copy bytes to trim", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+
+ return ret;
+}
+
+static int
+dxgk_evict(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_evict args;
+ struct d3dkmt_evict *input = inargs;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.alloc_count > D3DKMT_MAKERESIDENT_ALLOC_MAX ||
+ args.alloc_count == 0) {
+ pr_err("invalid number of allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_evict(process, adapter, &args);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(&input->num_bytes_to_trim,
+ &args.num_bytes_to_trim, sizeof(u64));
+ if (ret) {
+ pr_err("%s failed to copy bytes to trim to user", __func__);
+ ret = -EINVAL;
+ }
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_offer_allocations(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_offerallocations args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.allocation_count > D3DKMT_MAKERESIDENT_ALLOC_MAX ||
+ args.allocation_count == 0) {
+ pr_err("invalid number of allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if ((args.resources == NULL) == (args.allocations == NULL)) {
+ pr_err("invalid pointer to resources/allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_offer_allocations(process, adapter, &args);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_reclaim_allocations(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_reclaimallocations2 args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dkmt_reclaimallocations2 * __user in_args = inargs;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.allocation_count > D3DKMT_MAKERESIDENT_ALLOC_MAX ||
+ args.allocation_count == 0) {
+ pr_err("invalid number of allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if ((args.resources == NULL) == (args.allocations == NULL)) {
+ pr_err("invalid pointer to resources/allocations");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.paging_queue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_reclaim_allocations(process, adapter,
+ device->handle, &args,
+ &in_args->paging_fence_value);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_submit_command(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_submitcommand args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.broadcast_context_count > D3DDDI_MAX_BROADCAST_CONTEXT ||
+ args.broadcast_context_count == 0) {
+ pr_err("invalid number of contexts");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("invalid private data size");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.num_history_buffers > 1024) {
+ pr_err("invalid number of history buffers");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("invalid number of primaries");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.broadcast_context[0]);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_submit_command(process, adapter, &args);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_submit_command_to_hwqueue(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_submitcommandtohwqueue args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("invalid private data size");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("invalid number of primaries");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ args.hwqueue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_submit_command_hwqueue(process, adapter, &args);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_submitsignalsyncobjectstohwqueue args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dkmthandle hwqueue = {};
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.hwqueue_count > D3DDDI_MAX_BROADCAST_CONTEXT ||
+ args.hwqueue_count == 0) {
+ pr_err("invalid hwqueue count");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count > D3DDDI_MAX_OBJECT_SIGNALED ||
+ args.object_count == 0) {
+ pr_err("invalid number of syn cobject");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = copy_from_user(&hwqueue, args.hwqueues,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy hwqueue handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ hwqueue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_signal_sync_object(process, adapter,
+ args.flags, 0, zerohandle,
+ args.object_count, args.objects,
+ args.hwqueue_count, args.hwqueues,
+ args.object_count,
+ args.fence_values, NULL,
+ zerohandle);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_submitwaitforsyncobjectstohwqueue args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int ret;
+ struct d3dkmthandle *objects = NULL;
+ u32 object_size;
+ u64 *fences = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON ||
+ args.object_count == 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ object_size = sizeof(struct d3dkmthandle) * args.object_count;
+ objects = vzalloc(object_size);
+ if (objects == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(objects, args.objects, object_size);
+ if (ret) {
+ pr_err("%s failed to copy objects", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ object_size = sizeof(u64) * args.object_count;
+ fences = vzalloc(object_size);
+ if (fences == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(fences, args.fence_values, object_size);
+ if (ret) {
+ pr_err("%s failed to copy fence values", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGHWQUEUE,
+ args.hwqueue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter,
+ args.hwqueue, args.object_count,
+ objects, fences, false);
+
+cleanup:
+
+ if (objects)
+ vfree(objects);
+ if (fences)
+ vfree(fences);
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_map_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+ int ret, ret2;
+ struct d3dddi_mapgpuvirtualaddress args;
+ struct d3dddi_mapgpuvirtualaddress *input = inargs;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.paging_queue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_map_gpu_va(process, zerohandle, adapter, &args);
+ if (ret < 0)
+ goto cleanup;
+ /* STATUS_PENING is a success code > 0. It is returned to user mode */
+ if (!(ret == STATUS_PENDING || ret == 0)) {
+ pr_err("%s Unexpected error %x", __func__, ret);
+ goto cleanup;
+ }
+
+ ret2 = copy_to_user(&input->paging_fence_value,
+ &args.paging_fence_value, sizeof(u64));
+ if (ret2) {
+ pr_err("%s failed to copy paging fence to user", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret2 = copy_to_user(&input->virtual_address, &args.virtual_address,
+ sizeof(args.virtual_address));
+ if (ret2) {
+ pr_err("%s failed to copy va to user", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_reserve_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dddi_reservegpuvirtualaddress args;
+ struct d3dddi_reservegpuvirtualaddress *input = inargs;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, false);
+ if (adapter == NULL) {
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.adapter);
+ if (device == NULL) {
+ pr_err("invalid adapter or paging queue: 0x%x",
+ args.adapter.v);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ kref_get(&adapter->adapter_kref);
+ kref_put(&device->device_kref, dxgdevice_release);
+ } else {
+ args.adapter = adapter->host_handle;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_reserve_gpu_va(process, adapter, &args);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(&input->virtual_address, &args.virtual_address,
+ sizeof(args.virtual_address));
+ if (ret) {
+ pr_err("%s failed to copy VA to user", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (adapter) {
+ dxgadapter_release_lock_shared(adapter);
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ }
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_free_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_freegpuvirtualaddress args;
+ struct dxgadapter *adapter = NULL;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ args.adapter = adapter->host_handle;
+ ret = dxgvmb_send_free_gpu_va(process, adapter, &args);
+
+cleanup:
+
+ if (adapter) {
+ dxgadapter_release_lock_shared(adapter);
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ }
+
+ return ret;
+}
+
+static int
+dxgk_update_gpu_va(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_updategpuvirtualaddress args;
+ struct d3dkmt_updategpuvirtualaddress *input = inargs;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_update_gpu_va(process, adapter, &args);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(&input->fence_value, &args.fence_value,
+ sizeof(args.fence_value));
+ if (ret) {
+ pr_err("%s failed to copy fence value to user", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ return ret;
+}
+
+static int
+dxgk_create_sync_object(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_createsynchronizationobject2 args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct eventfd_ctx *event = NULL;
+ struct dxgsyncobject *syncobj = NULL;
+ bool device_lock_acquired = false;
+ struct dxgsharedsyncobject *syncobjgbl = NULL;
+ struct dxghosteventcpu *host_event = NULL;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0)
+ goto cleanup;
+
+ device_lock_acquired = true;
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ syncobj = dxgsyncobject_create(process, device, adapter, args.info.type,
+ args.info.flags);
+ if (syncobj == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.info.type == _D3DDDI_CPU_NOTIFICATION) {
+ event = eventfd_ctx_fdget((int)
+ args.info.cpu_notification.event);
+ if (IS_ERR(event)) {
+ pr_err("failed to reference the event");
+ event = NULL;
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ host_event = syncobj->host_event;
+ host_event->hdr.event_id = dxgglobal_new_host_event_id();
+ host_event->cpu_event = event;
+ host_event->remove_from_list = false;
+ host_event->destroy_after_signal = false;
+ host_event->hdr.event_type = dxghostevent_cpu_event;
+ dxgglobal_add_host_event(&host_event->hdr);
+ args.info.cpu_notification.event = host_event->hdr.event_id;
+ dev_dbg(dxgglobaldev, "creating CPU notification event: %lld",
+ args.info.cpu_notification.event);
+ }
+
+ ret = dxgvmb_send_create_sync_object(process, adapter, &args, syncobj);
+ if (ret < 0)
+ goto cleanup;
+
+ if (args.info.flags.shared) {
+ syncobjgbl = dxgsharedsyncobj_create(device->adapter, syncobj);
+ if (syncobjgbl == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ dxgsharedsyncobj_add_syncobj(syncobjgbl, syncobj);
+
+ syncobjgbl->host_shared_handle = args.info.shared_handle;
+ }
+
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy output args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
+ args.sync_object);
+ if (ret >= 0)
+ syncobj->handle = args.sync_object;
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+cleanup:
+
+ if (ret < 0) {
+ if (syncobj) {
+ dxgsyncobject_destroy(process, syncobj);
+ if (args.sync_object.v)
+ dxgvmb_send_destroy_sync_object(process,
+ args.sync_object);
+ event = NULL;
+ }
+ if (event)
+ eventfd_ctx_put(event);
+ }
+ if (syncobjgbl)
+ kref_put(&syncobjgbl->ssyncobj_kref, dxgsharedsyncobj_release);
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device_lock_acquired)
+ dxgdevice_release_lock_shared(device);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_destroy_sync_object(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_destroysynchronizationobject args;
+ struct dxgsyncobject *syncobj = NULL;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "handle 0x%x", args.sync_object.v);
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ syncobj = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
+ args.sync_object, true);
+ if (syncobj) {
+ dev_dbg(dxgglobaldev, "syncobj 0x%p", syncobj);
+ syncobj->handle.v = 0;
+ hmgrtable_free_handle(&process->handle_table,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
+ args.sync_object);
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (syncobj == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dxgsyncobject_destroy(process, syncobj);
+
+ ret = dxgvmb_send_destroy_sync_object(process, args.sync_object);
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_open_sync_object_nt(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_opensyncobjectfromnthandle2 args;
+ struct dxgsyncobject *syncobj = NULL;
+ struct dxgsharedsyncobject *syncobj_fd = NULL;
+ struct file *file = NULL;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dddi_synchronizationobject_flags flags = { };
+ int ret;
+ bool device_lock_acquired = false;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ args.sync_object.v = 0;
+
+ if (args.device.v) {
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ return -EINVAL;
+ goto cleanup;
+ }
+ } else {
+ pr_err("device handle is missing");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0)
+ goto cleanup;
+
+ device_lock_acquired = true;
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ file = fget(args.nt_handle);
+ if (!file) {
+ pr_err("failed to get file from handle: %llx",
+ args.nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (file->f_op != &dxg_syncobj_fops) {
+ pr_err("invalid fd: %llx", args.nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ syncobj_fd = file->private_data;
+ if (syncobj_fd == NULL) {
+ pr_err("invalid private data: %llx", args.nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ flags.shared = 1;
+ flags.nt_security_sharing = 1;
+ syncobj = dxgsyncobject_create(process, device, adapter,
+ syncobj_fd->type, flags);
+ if (syncobj == NULL) {
+ pr_err("failed to create sync object");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dxgsharedsyncobj_add_syncobj(syncobj_fd, syncobj);
+
+ ret = dxgvmb_send_open_sync_object_nt(process, &dxgglobal->channel,
+ &args, syncobj);
+ if (ret < 0) {
+ pr_err("failed to open sync object on host: %x",
+ syncobj_fd->host_shared_handle.v);
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
+ args.sync_object);
+ if (ret >= 0) {
+ syncobj->handle = args.sync_object;
+ kref_get(&syncobj->syncobj_kref);
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret == 0)
+ goto success;
+ ret = -EINVAL;
+ pr_err("%s failed to copy output args", __func__);
+
+cleanup:
+
+ if (syncobj) {
+ dxgsyncobject_destroy(process, syncobj);
+ syncobj = NULL;
+ }
+
+ if (args.sync_object.v)
+ dxgvmb_send_destroy_sync_object(process, args.sync_object);
+
+success:
+
+ if (file)
+ fput(file);
+ if (syncobj)
+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release);
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device_lock_acquired)
+ dxgdevice_release_lock_shared(device);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_open_sync_object(struct dxgprocess *process, void *__user inargs)
+{
+ pr_err("%s is not supported", __func__);
+ return -ENOTTY;
+}
+
+static int
+dxgk_signal_sync_object(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_signalsynchronizationobject2 args;
+ struct d3dkmt_signalsynchronizationobject2 *__user in_args = inargs;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int ret;
+ u32 fence_count = 1;
+ struct eventfd_ctx *event = NULL;
+ struct dxghosteventcpu *host_event = NULL;
+ bool host_event_added = false;
+ u64 host_event_id = 0;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.context_count >= D3DDDI_MAX_BROADCAST_CONTEXT ||
+ args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.flags.enqueue_cpu_event) {
+ host_event = vzalloc(sizeof(*host_event));
+ if (host_event == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ host_event->process = process;
+ event = eventfd_ctx_fdget((int)args.cpu_event_handle);
+ if (IS_ERR(event)) {
+ pr_err("failed to reference the event");
+ event = NULL;
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ fence_count = 0;
+ host_event->cpu_event = event;
+ host_event_id = dxgglobal_new_host_event_id();
+ host_event->hdr.event_type = dxghostevent_cpu_event;
+ host_event->hdr.event_id = host_event_id;
+ host_event->remove_from_list = true;
+ host_event->destroy_after_signal = true;
+ dxgglobal_add_host_event(&host_event->hdr);
+ host_event_added = true;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_signal_sync_object(process, adapter,
+ args.flags, args.fence.fence_value,
+ args.context, args.object_count,
+ in_args->object_array,
+ args.context_count,
+ in_args->contexts, fence_count,
+ NULL, (void *)host_event_id,
+ zerohandle);
+
+ /*
+ * When the send operation succeeds, the host event will be destroyed
+ * after signal from the host
+ */
+
+cleanup:
+
+ if (ret < 0) {
+ if (host_event_added) {
+ /* The event might be signaled and destroyed by host */
+ host_event = (struct dxghosteventcpu *)
+ dxgglobal_get_host_event(host_event_id);
+ if (host_event) {
+ eventfd_ctx_put(event);
+ event = NULL;
+ vfree(host_event);
+ host_event = NULL;
+ }
+ }
+ if (event)
+ eventfd_ctx_put(event);
+ if (host_event)
+ vfree(host_event);
+ }
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_signalsynchronizationobjectfromcpu args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (args.object_count == 0 ||
+ args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) {
+ dev_dbg(dxgglobaldev, "Too many objects: %d",
+ args.object_count);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_signal_sync_object(process, adapter,
+ args.flags, 0, zerohandle,
+ args.object_count, args.objects, 0,
+ NULL, args.object_count,
+ args.fence_values, NULL,
+ args.device);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_signalsynchronizationobjectfromgpu args;
+ struct d3dkmt_signalsynchronizationobjectfromgpu *__user user_args =
+ inargs;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dddicb_signalflags flags = { };
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count == 0 ||
+ args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_signal_sync_object(process, adapter,
+ flags, 0, zerohandle,
+ args.object_count,
+ args.objects, 1,
+ &user_args->context,
+ args.object_count,
+ args.monitored_fence_values, NULL,
+ zerohandle);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_signalsynchronizationobjectfromgpu2 args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dkmthandle context_handle;
+ struct eventfd_ctx *event = NULL;
+ u64 *fences = NULL;
+ u32 fence_count = 0;
+ int ret;
+ struct dxghosteventcpu *host_event = NULL;
+ bool host_event_added = false;
+ u64 host_event_id = 0;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.flags.enqueue_cpu_event) {
+ if (args.object_count != 0 || args.cpu_event_handle == 0) {
+ pr_err("Bad input for EnqueueCpuEvent: %d %lld",
+ args.object_count, args.cpu_event_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ } else if (args.object_count == 0 ||
+ args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE ||
+ args.context_count == 0 ||
+ args.context_count > DXG_MAX_VM_BUS_PACKET_SIZE) {
+ pr_err("Invalid input: %d %d",
+ args.object_count, args.context_count);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = copy_from_user(&context_handle, args.contexts,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy context handle", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.flags.enqueue_cpu_event) {
+ host_event = vzalloc(sizeof(*host_event));
+ if (host_event == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ host_event->process = process;
+ event = eventfd_ctx_fdget((int)args.cpu_event_handle);
+ if (IS_ERR(event)) {
+ pr_err("failed to reference the event");
+ event = NULL;
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ fence_count = 0;
+ host_event->cpu_event = event;
+ host_event_id = dxgglobal_new_host_event_id();
+ host_event->hdr.event_id = host_event_id;
+ host_event->hdr.event_type = dxghostevent_cpu_event;
+ host_event->remove_from_list = true;
+ host_event->destroy_after_signal = true;
+ dxgglobal_add_host_event(&host_event->hdr);
+ host_event_added = true;
+ } else {
+ fences = args.monitored_fence_values;
+ fence_count = args.object_count;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ context_handle);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_signal_sync_object(process, adapter,
+ args.flags, 0, zerohandle,
+ args.object_count, args.objects,
+ args.context_count, args.contexts,
+ fence_count, fences,
+ (void *)host_event_id, zerohandle);
+
+cleanup:
+
+ if (ret < 0) {
+ if (host_event_added) {
+ /* The event might be signaled and destroyed by host */
+ host_event = (struct dxghosteventcpu *)
+ dxgglobal_get_host_event(host_event_id);
+ if (host_event) {
+ eventfd_ctx_put(event);
+ event = NULL;
+ vfree(host_event);
+ host_event = NULL;
+ }
+ }
+ if (event)
+ eventfd_ctx_put(event);
+ if (host_event)
+ vfree(host_event);
+ }
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_wait_sync_object(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_waitforsynchronizationobject2 args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON ||
+ args.object_count == 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "Fence value: %lld", args.fence.fence_value);
+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter,
+ args.context, args.object_count,
+ args.object_array,
+ &args.fence.fence_value, true);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_waitforsynchronizationobjectfromcpu args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct eventfd_ctx *event = NULL;
+ struct dxghosteventcpu host_event = { };
+ struct dxghosteventcpu *async_host_event = NULL;
+ struct completion local_event = { };
+ u64 event_id = 0;
+ int ret;
+ bool host_event_added = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE ||
+ args.object_count == 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.async_event) {
+ async_host_event = vzalloc(sizeof(*async_host_event));
+ if (async_host_event == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ async_host_event->process = process;
+ event = eventfd_ctx_fdget((int)args.async_event);
+ if (IS_ERR(event)) {
+ pr_err("failed to reference the event");
+ event = NULL;
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ async_host_event->cpu_event = event;
+ async_host_event->hdr.event_id = dxgglobal_new_host_event_id();
+ async_host_event->destroy_after_signal = true;
+ async_host_event->hdr.event_type = dxghostevent_cpu_event;
+ dxgglobal_add_host_event(&async_host_event->hdr);
+ event_id = async_host_event->hdr.event_id;
+ host_event_added = true;
+ } else {
+ init_completion(&local_event);
+ host_event.completion_event = &local_event;
+ host_event.hdr.event_id = dxgglobal_new_host_event_id();
+ host_event.hdr.event_type = dxghostevent_cpu_event;
+ dxgglobal_add_host_event(&host_event.hdr);
+ event_id = host_event.hdr.event_id;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_wait_sync_object_cpu(process, adapter,
+ &args, true, event_id);
+ if (ret < 0)
+ goto cleanup;
+
+ if (args.async_event == 0) {
+ dxgadapter_release_lock_shared(adapter);
+ adapter = NULL;
+ ret = wait_for_completion_interruptible(&local_event);
+ if (ret) {
+ pr_err("%s: wait_completion_interruptible failed: %d",
+ __func__, ret);
+ ret = -ERESTARTSYS;
+ }
+
+ }
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ if (host_event.hdr.event_id)
+ dxgglobal_remove_host_event(&host_event.hdr);
+ if (ret < 0) {
+ if (host_event_added) {
+ async_host_event = (struct dxghosteventcpu *)
+ dxgglobal_get_host_event(event_id);
+ if (async_host_event) {
+ if (async_host_event->hdr.event_type ==
+ dxghostevent_cpu_event) {
+ eventfd_ctx_put(event);
+ event = NULL;
+ vfree(async_host_event);
+ async_host_event = NULL;
+ } else {
+ pr_err("Invalid event type");
+ DXGKRNL_ASSERT(0);
+ }
+ }
+ }
+ if (event)
+ eventfd_ctx_put(event);
+ if (async_host_event)
+ vfree(async_host_event);
+ }
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_waitforsynchronizationobjectfromgpu args;
+ struct dxgcontext *context = NULL;
+ struct d3dkmthandle device_handle = {};
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct dxgsyncobject *syncobj = NULL;
+ struct d3dkmthandle *objects = NULL;
+ u32 object_size;
+ u64 *fences = NULL;
+ int ret;
+ enum hmgrentry_type syncobj_type = HMGRENTRY_TYPE_FREE;
+ bool monitored_fence = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE ||
+ args.object_count == 0) {
+ pr_err("Invalid object count: %d", args.object_count);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ object_size = sizeof(struct d3dkmthandle) * args.object_count;
+ objects = vzalloc(object_size);
+ if (objects == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(objects, args.objects, object_size);
+ if (ret) {
+ pr_err("%s failed to copy objects", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+ context = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ args.context, true);
+ if (context) {
+ device_handle = context->device_handle;
+ syncobj_type =
+ hmgrtable_get_object_type(&process->handle_table,
+ objects[0]);
+ }
+ if (device_handle.v == 0) {
+ pr_err("Invalid context handle: %x", args.context.v);
+ ret = -EINVAL;
+ } else {
+ if (syncobj_type == HMGRENTRY_TYPE_MONITOREDFENCE) {
+ monitored_fence = true;
+ } else if (syncobj_type == HMGRENTRY_TYPE_DXGSYNCOBJECT) {
+ syncobj =
+ hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
+ objects[0], true);
+ if (syncobj == NULL) {
+ pr_err("Invalid syncobj: %x", objects[0].v);
+ ret = -EINVAL;
+ } else {
+ monitored_fence = syncobj->monitored_fence;
+ }
+ } else {
+ pr_err("Invalid syncobj type: %x", objects[0].v);
+ ret = -EINVAL;
+ }
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+ if (ret < 0)
+ goto cleanup;
+
+ if (monitored_fence) {
+ object_size = sizeof(u64) * args.object_count;
+ fences = vzalloc(object_size);
+ if (fences == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(fences, args.monitored_fence_values,
+ object_size);
+ if (ret) {
+ pr_err("%s failed to copy fences", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ } else {
+ fences = &args.fence_value;
+ }
+
+ device = dxgprocess_device_by_handle(process, device_handle);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter,
+ args.context, args.object_count,
+ objects, fences,
+ !monitored_fence);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ if (objects)
+ vfree(objects);
+ if (fences && fences != &args.fence_value)
+ vfree(fences);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_lock2(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_lock2 args;
+ struct d3dkmt_lock2 *__user result = inargs;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ struct dxgallocation *alloc = NULL;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ args.data = NULL;
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ alloc = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ args.allocation, true);
+ if (alloc == NULL) {
+ ret = -EINVAL;
+ } else {
+ if (alloc->cpu_address) {
+ ret = copy_to_user(&result->data,
+ &alloc->cpu_address,
+ sizeof(args.data));
+ if (ret == 0) {
+ args.data = alloc->cpu_address;
+ if (alloc->cpu_address_mapped)
+ alloc->cpu_address_refcount++;
+ } else {
+ pr_err("%s Failed to copy cpu address",
+ __func__);
+ ret = -EINVAL;
+ }
+ }
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ if (ret < 0)
+ goto cleanup;
+ if (args.data)
+ goto success;
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_lock2(process, adapter, &args, result);
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+success:
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_unlock2(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_unlock2 args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ struct dxgallocation *alloc = NULL;
+ bool done = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ alloc = hmgrtable_get_object_by_type(&process->handle_table,
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ args.allocation, true);
+ if (alloc == NULL) {
+ ret = -EINVAL;
+ } else {
+ if (alloc->cpu_address == NULL) {
+ pr_err("Allocation is not locked: %p", alloc);
+ ret = -EINVAL;
+ } else if (alloc->cpu_address_mapped) {
+ if (alloc->cpu_address_refcount > 0) {
+ alloc->cpu_address_refcount--;
+ if (alloc->cpu_address_refcount != 0) {
+ done = true;
+ } else {
+ dxg_unmap_iospace(alloc->cpu_address,
+ alloc->num_pages << PAGE_SHIFT);
+ alloc->cpu_address_mapped = false;
+ alloc->cpu_address = NULL;
+ }
+ } else {
+ pr_err("Invalid cpu access refcount");
+ done = true;
+ }
+ }
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ if (done)
+ goto success;
+ if (ret < 0)
+ goto cleanup;
+
+ /*
+ * The call acquires reference on the device. It is safe to access the
+ * adapter, because the device holds reference on it.
+ */
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_unlock2(process, adapter, &args);
+
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+success:
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_update_alloc_property(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dddi_updateallocproperty args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGPAGINGQUEUE,
+ args.paging_queue);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_update_alloc_property(process, adapter,
+ &args, inargs);
+
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_mark_device_as_error(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_markdeviceaserror args;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ int ret;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ device->execution_state = _D3DKMT_DEVICEEXECUTION_RESET;
+ ret = dxgvmb_send_mark_device_as_error(process, adapter, &args);
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_query_alloc_residency(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_queryallocationresidency args;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if ((args.allocation_count == 0) == (args.resource.v == 0)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ ret = dxgvmb_send_query_alloc_residency(process, adapter, &args);
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_set_allocation_priority(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_setallocationpriority args;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ ret = dxgvmb_send_set_allocation_priority(process, adapter, &args);
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_get_allocation_priority(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_getallocationpriority args;
+ struct dxgadapter *adapter = NULL;
+ struct dxgdevice *device = NULL;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ ret = dxgvmb_send_get_allocation_priority(process, adapter, &args);
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+set_context_scheduling_priority(struct dxgprocess *process,
+ struct d3dkmthandle hcontext,
+ int priority, bool in_process)
+{
+ int ret = 0;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ hcontext);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ ret = dxgvmb_send_set_context_sch_priority(process, adapter,
+ hcontext, priority,
+ in_process);
+ if (ret < 0)
+ pr_err("send_set_context_scheduling_priority failed");
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ return ret;
+}
+
+static int
+dxgk_set_context_scheduling_priority(struct dxgprocess *process,
+ void *__user inargs)
+{
+ struct d3dkmt_setcontextschedulingpriority args;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = set_context_scheduling_priority(process, args.context,
+ args.priority, false);
+cleanup:
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+get_context_scheduling_priority(struct dxgprocess *process,
+ struct d3dkmthandle hcontext,
+ int __user *priority,
+ bool in_process)
+{
+ int ret;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int pri = 0;
+
+ device = dxgprocess_device_by_object_handle(process,
+ HMGRENTRY_TYPE_DXGCONTEXT,
+ hcontext);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ ret = dxgvmb_send_get_context_sch_priority(process, adapter,
+ hcontext, &pri, in_process);
+ if (ret < 0)
+ goto cleanup;
+ ret = copy_to_user(priority, &pri, sizeof(pri));
+ if (ret) {
+ pr_err("%s failed to copy priority to user", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ return ret;
+}
+
+static int
+dxgk_get_context_scheduling_priority(struct dxgprocess *process,
+ void *__user inargs)
+{
+ struct d3dkmt_getcontextschedulingpriority args;
+ struct d3dkmt_getcontextschedulingpriority __user *input = inargs;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = get_context_scheduling_priority(process, args.context,
+ &input->priority, false);
+cleanup:
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_set_context_process_scheduling_priority(struct dxgprocess *process,
+ void *__user inargs)
+{
+ struct d3dkmt_setcontextinprocessschedulingpriority args;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = set_context_scheduling_priority(process, args.context,
+ args.priority, true);
+cleanup:
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_get_context_process_scheduling_priority(struct dxgprocess *process,
+ void __user *inargs)
+{
+ struct d3dkmt_getcontextinprocessschedulingpriority args;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = get_context_scheduling_priority(process, args.context,
+ &((struct d3dkmt_getcontextinprocessschedulingpriority *)
+ inargs)->priority, true);
+cleanup:
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_changevideomemoryreservation args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ bool adapter_locked = false;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.process != 0) {
+ pr_err("setting memory reservation for other process");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ adapter_locked = true;
+ args.adapter.v = 0;
+ ret = dxgvmb_send_change_vidmem_reservation(process, adapter,
+ zerohandle, &args);
+
+cleanup:
+
+ if (adapter_locked)
+ dxgadapter_release_lock_shared(adapter);
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_query_clock_calibration(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_queryclockcalibration args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ bool adapter_locked = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ adapter_locked = true;
+
+ args.adapter = adapter->host_handle;
+ ret = dxgvmb_send_query_clock_calibration(process, adapter,
+ &args, inargs);
+ if (ret < 0)
+ goto cleanup;
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy output args", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (adapter_locked)
+ dxgadapter_release_lock_shared(adapter);
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ return ret;
+}
+
+static int
+dxgk_flush_heap_transitions(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_flushheaptransitions args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ bool adapter_locked = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ adapter_locked = true;
+
+ args.adapter = adapter->host_handle;
+ ret = dxgvmb_send_flush_heap_transitions(process, adapter, &args);
+ if (ret < 0)
+ goto cleanup;
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy output args", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (adapter_locked)
+ dxgadapter_release_lock_shared(adapter);
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ return ret;
+}
+
+static int
+dxgk_escape(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_escape args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ bool adapter_locked = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ adapter_locked = true;
+
+ args.adapter = adapter->host_handle;
+ ret = dxgvmb_send_escape(process, adapter, &args);
+
+cleanup:
+
+ if (adapter_locked)
+ dxgadapter_release_lock_shared(adapter);
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_query_vidmem_info(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_queryvideomemoryinfo args;
+ int ret;
+ struct dxgadapter *adapter = NULL;
+ bool adapter_locked = false;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.process != 0) {
+ pr_err("query vidmem info from another process ");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = dxgprocess_adapter_by_handle(process, args.adapter, true);
+ if (adapter == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+ adapter_locked = true;
+
+ args.adapter = adapter->host_handle;
+ ret = dxgvmb_send_query_vidmem_info(process, adapter, &args, inargs);
+
+cleanup:
+
+ if (adapter_locked)
+ dxgadapter_release_lock_shared(adapter);
+ if (adapter)
+ kref_put(&adapter->adapter_kref, dxgadapter_release);
+ if (ret < 0)
+ pr_err("%s failed: %x", __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_get_device_state(struct dxgprocess *process, void *__user inargs)
+{
+ int ret;
+ struct d3dkmt_getdevicestate args;
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ int global_device_state_counter = 0;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ if (args.state_type == _D3DKMT_DEVICESTATE_EXECUTION) {
+ global_device_state_counter =
+ atomic_read(&dxgglobal->device_state_counter);
+ // TODO: re-enable caching
+ if (false) {
+ args.execution_state = device->execution_state;
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy args to user",
+ __func__);
+ ret = -EINVAL;
+ }
+ goto cleanup;
+ }
+ }
+
+ ret = dxgvmb_send_get_device_state(process, adapter, &args, inargs);
+
+ if (ret == 0 && args.state_type == _D3DKMT_DEVICESTATE_EXECUTION) {
+ device->execution_state = args.execution_state;
+ device->execution_state_counter = global_device_state_counter;
+ }
+
+cleanup:
+
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+ if (ret < 0)
+ pr_err("%s failed %x", __func__, ret);
+
+ return ret;
+}
+
+static int
+dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj,
+ struct dxgprocess *process,
+ struct d3dkmthandle objecthandle)
+{
+ int ret = 0;
+
+ mutex_lock(&syncobj->fd_mutex);
+ if (syncobj->host_shared_handle_nt_reference == 0) {
+ ret = dxgvmb_send_create_nt_shared_object(process,
+ objecthandle,
+ &syncobj->host_shared_handle_nt);
+ if (ret < 0)
+ goto cleanup;
+ dev_dbg(dxgglobaldev, "Host_shared_handle_ht: %x",
+ syncobj->host_shared_handle_nt.v);
+ kref_get(&syncobj->ssyncobj_kref);
+ }
+ syncobj->host_shared_handle_nt_reference++;
+cleanup:
+ mutex_unlock(&syncobj->fd_mutex);
+ return ret;
+}
+
+static int
+dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource,
+ struct dxgprocess *process,
+ struct d3dkmthandle objecthandle)
+{
+ int ret = 0;
+
+ mutex_lock(&resource->fd_mutex);
+ if (resource->host_shared_handle_nt_reference == 0) {
+ ret = dxgvmb_send_create_nt_shared_object(process,
+ objecthandle,
+ &resource->host_shared_handle_nt);
+ if (ret < 0)
+ goto cleanup;
+ dev_dbg(dxgglobaldev, "Resource host_shared_handle_ht: %x",
+ resource->host_shared_handle_nt.v);
+ kref_get(&resource->sresource_kref);
+ }
+ resource->host_shared_handle_nt_reference++;
+cleanup:
+ mutex_unlock(&resource->fd_mutex);
+ return ret;
+}
+
+enum dxg_sharedobject_type {
+ DXG_SHARED_SYNCOBJECT,
+ DXG_SHARED_RESOURCE
+};
+
+static int get_object_fd(enum dxg_sharedobject_type type,
+ void *object, int *fdout)
+{
+ struct file *file;
+ int fd;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ pr_err("get_unused_fd_flags failed: %x", fd);
+ return -ENOTRECOVERABLE;
+ }
+
+ switch (type) {
+ case DXG_SHARED_SYNCOBJECT:
+ file = anon_inode_getfile("dxgsyncobj",
+ &dxg_syncobj_fops, object, 0);
+ break;
+ case DXG_SHARED_RESOURCE:
+ file = anon_inode_getfile("dxgresource",
+ &dxg_resource_fops, object, 0);
+ break;
+ default:
+ return -EINVAL;
+ };
+ if (IS_ERR(file)) {
+ pr_err("anon_inode_getfile failed: %x", fd);
+ put_unused_fd(fd);
+ return -ENOTRECOVERABLE;
+ }
+
+ fd_install(fd, file);
+ *fdout = fd;
+ return 0;
+}
+
+static int
+dxgk_share_objects(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_shareobjects args;
+ enum hmgrentry_type object_type;
+ struct dxgsyncobject *syncobj = NULL;
+ struct dxgresource *resource = NULL;
+ struct dxgsharedsyncobject *shared_syncobj = NULL;
+ struct dxgsharedresource *shared_resource = NULL;
+ struct d3dkmthandle *handles = NULL;
+ int object_fd = 0;
+ void *obj = NULL;
+ u32 handle_size;
+ int ret;
+ u64 tmp = 0;
+
+ dev_dbg(dxgglobaldev, "ioctl: %s", __func__);
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (args.object_count == 0 || args.object_count > 1) {
+ pr_err("invalid object count %d", args.object_count);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ handle_size = args.object_count * sizeof(struct d3dkmthandle);
+
+ handles = vzalloc(handle_size);
+ if (handles == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ ret = copy_from_user(handles, args.objects, handle_size);
+ if (ret) {
+ pr_err("%s failed to copy object handles", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "Sharing handle: %x", handles[0].v);
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
+ object_type = hmgrtable_get_object_type(&process->handle_table,
+ handles[0]);
+ obj = hmgrtable_get_object(&process->handle_table, handles[0]);
+ if (obj == NULL) {
+ pr_err("invalid object handle %x", handles[0].v);
+ ret = -EINVAL;
+ } else {
+ switch (object_type) {
+ case HMGRENTRY_TYPE_DXGSYNCOBJECT:
+ syncobj = obj;
+ if (syncobj->shared) {
+ kref_get(&syncobj->syncobj_kref);
+ shared_syncobj = syncobj->shared_owner;
+ } else {
+ pr_err("sync object is not shared");
+ syncobj = NULL;
+ ret = -EINVAL;
+ }
+ break;
+ case HMGRENTRY_TYPE_DXGRESOURCE:
+ resource = obj;
+ if (resource->shared_owner) {
+ kref_get(&resource->resource_kref);
+ shared_resource = resource->shared_owner;
+ } else {
+ resource = NULL;
+ pr_err("resource object is not shared");
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ pr_err("invalid object type %d", object_type);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
+
+ if (ret < 0)
+ goto cleanup;
+
+ switch (object_type) {
+ case HMGRENTRY_TYPE_DXGSYNCOBJECT:
+ ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj,
+ &object_fd);
+ if (ret < 0) {
+ pr_err("%s get_object_fd failed for sync object",
+ __func__);
+ goto cleanup;
+ }
+ ret = dxgsharedsyncobj_get_host_nt_handle(shared_syncobj,
+ process,
+ handles[0]);
+ if (ret < 0) {
+ pr_err("%s get_host_nt_handle failed", __func__);
+ goto cleanup;
+ }
+ break;
+ case HMGRENTRY_TYPE_DXGRESOURCE:
+ ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource,
+ &object_fd);
+ if (ret < 0) {
+ pr_err("%s get_object_fd failed for resource",
+ __func__);
+ goto cleanup;
+ }
+ ret = dxgsharedresource_get_host_nt_handle(shared_resource,
+ process, handles[0]);
+ if (ret < 0) {
+ pr_err("%s get_host_res_nt_handle failed", __func__);
+ goto cleanup;
+ }
+ ret = dxgsharedresource_seal(shared_resource);
+ if (ret < 0) {
+ pr_err("%s dxgsharedresource_seal failed", __func__);
+ goto cleanup;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ goto cleanup;
+
+ dev_dbg(dxgglobaldev, "Object FD: %x", object_fd);
+
+ tmp = (u64) object_fd;
+
+ ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64));
+ if (ret) {
+ pr_err("%s failed to copy shared handle", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ if (ret < 0) {
+ if (object_fd > 0)
+ put_unused_fd(object_fd);
+ }
+
+ if (handles)
+ vfree(handles);
+
+ if (syncobj)
+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release);
+
+ if (resource)
+ kref_put(&resource->resource_kref, dxgresource_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_invalidate_cache(struct dxgprocess *process, void *__user inargs)
+{
+ pr_err("%s is not implemented", __func__);
+ return -ENOTTY;
+}
+
+static int
+dxgk_query_resource_info(struct dxgprocess *process, void *__user inargs)
+{
+ pr_err("%s is not supported", __func__);
+ return -ENOTTY;
+}
+
+static int
+dxgk_query_resource_info_nt(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_queryresourceinfofromnthandle args;
+ int ret;
+ struct dxgdevice *device = NULL;
+ struct dxgsharedresource *shared_resource = NULL;
+ struct file *file = NULL;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ file = fget(args.nt_handle);
+ if (!file) {
+ pr_err("failed to get file from handle: %llx",
+ args.nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (file->f_op != &dxg_resource_fops) {
+ pr_err("invalid fd: %llx", args.nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ shared_resource = file->private_data;
+ if (shared_resource == NULL) {
+ pr_err("invalid private data: %llx", args.nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ device = dxgprocess_device_by_handle(process, args.device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0) {
+ kref_put(&device->device_kref, dxgdevice_release);
+ device = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgsharedresource_seal(shared_resource);
+ if (ret < 0)
+ goto cleanup;
+
+ args.private_runtime_data_size =
+ shared_resource->runtime_private_data_size;
+ args.resource_priv_drv_data_size =
+ shared_resource->resource_private_data_size;
+ args.allocation_count = shared_resource->allocation_count;
+ args.total_priv_drv_data_size =
+ shared_resource->alloc_private_data_size;
+
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy output args", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (file)
+ fput(file);
+ if (device)
+ dxgdevice_release_lock_shared(device);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+int
+assign_resource_handles(struct dxgprocess *process,
+ struct dxgsharedresource *shared_resource,
+ struct d3dkmt_openresourcefromnthandle *args,
+ struct d3dkmthandle resource_handle,
+ struct dxgresource *resource,
+ struct dxgallocation **allocs,
+ struct d3dkmthandle *handles)
+{
+ int ret;
+ int i;
+ u8 *cur_priv_data;
+ u32 total_priv_data_size = 0;
+ struct d3dddi_openallocationinfo2 open_alloc_info = { };
+
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
+ ret = hmgrtable_assign_handle(&process->handle_table, resource,
+ HMGRENTRY_TYPE_DXGRESOURCE,
+ resource_handle);
+ if (ret < 0)
+ goto cleanup;
+ resource->handle = resource_handle;
+ resource->handle_valid = 1;
+ cur_priv_data = args->total_priv_drv_data;
+ for (i = 0; i < args->allocation_count; i++) {
+ ret = hmgrtable_assign_handle(&process->handle_table, allocs[i],
+ HMGRENTRY_TYPE_DXGALLOCATION,
+ handles[i]);
+ if (ret < 0)
+ goto cleanup;
+ allocs[i]->alloc_handle = handles[i];
+ allocs[i]->handle_valid = 1;
+ open_alloc_info.allocation = handles[i];
+ if (shared_resource->alloc_private_data_sizes)
+ open_alloc_info.priv_drv_data_size =
+ shared_resource->alloc_private_data_sizes[i];
+ else
+ open_alloc_info.priv_drv_data_size = 0;
+
+ total_priv_data_size += open_alloc_info.priv_drv_data_size;
+ open_alloc_info.priv_drv_data = cur_priv_data;
+ cur_priv_data += open_alloc_info.priv_drv_data_size;
+
+ ret = copy_to_user(&args->open_alloc_info[i],
+ &open_alloc_info,
+ sizeof(open_alloc_info));
+ if (ret) {
+ pr_err("%s failed to copy alloc info", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+ args->total_priv_drv_data_size = total_priv_data_size;
+cleanup:
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
+ if (ret < 0) {
+ for (i = 0; i < args->allocation_count; i++)
+ dxgallocation_free_handle(allocs[i]);
+ dxgresource_free_handle(resource);
+ }
+ dev_dbg(dxgglobaldev, "%s end %x", __func__, ret);
+ return ret;
+}
+
+int
+open_resource(struct dxgprocess *process,
+ struct d3dkmt_openresourcefromnthandle *args,
+ __user struct d3dkmthandle *res_out,
+ __user u32 *total_driver_data_size_out)
+{
+ int ret = 0;
+ int i;
+ struct d3dkmthandle *alloc_handles = NULL;
+ int alloc_handles_size = sizeof(struct d3dkmthandle) *
+ args->allocation_count;
+ struct dxgsharedresource *shared_resource = NULL;
+ struct dxgresource *resource = NULL;
+ struct dxgallocation **allocs = NULL;
+ struct d3dkmthandle global_share = {};
+ struct dxgdevice *device = NULL;
+ struct dxgadapter *adapter = NULL;
+ struct d3dkmthandle resource_handle = {};
+ struct file *file = NULL;
+
+ dev_dbg(dxgglobaldev, "Opening resource handle: %llx", args->nt_handle);
+
+ file = fget(args->nt_handle);
+ if (!file) {
+ pr_err("failed to get file from handle: %llx",
+ args->nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (file->f_op != &dxg_resource_fops) {
+ pr_err("invalid fd type: %llx", args->nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ shared_resource = file->private_data;
+ if (shared_resource == NULL) {
+ pr_err("invalid private data: %llx",
+ args->nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (kref_get_unless_zero(&shared_resource->sresource_kref) == 0)
+ shared_resource = NULL;
+ else
+ global_share = shared_resource->host_shared_handle_nt;
+
+ if (shared_resource == NULL) {
+ pr_err("Invalid shared resource handle: %x",
+ (u32)args->nt_handle);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "Shared resource: %p %x", shared_resource,
+ global_share.v);
+
+ device = dxgprocess_device_by_handle(process, args->device);
+ if (device == NULL) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgdevice_acquire_lock_shared(device);
+ if (ret < 0) {
+ kref_put(&device->device_kref, dxgdevice_release);
+ device = NULL;
+ goto cleanup;
+ }
+
+ adapter = device->adapter;
+ ret = dxgadapter_acquire_lock_shared(adapter);
+ if (ret < 0) {
+ adapter = NULL;
+ goto cleanup;
+ }
+
+ ret = dxgsharedresource_seal(shared_resource);
+ if (ret < 0)
+ goto cleanup;
+
+ if (args->allocation_count != shared_resource->allocation_count ||
+ args->private_runtime_data_size <
+ shared_resource->runtime_private_data_size ||
+ args->resource_priv_drv_data_size <
+ shared_resource->resource_private_data_size ||
+ args->total_priv_drv_data_size <
+ shared_resource->alloc_private_data_size) {
+ ret = -EINVAL;
+ pr_err("Invalid data sizes");
+ goto cleanup;
+ }
+
+ alloc_handles = vzalloc(alloc_handles_size);
+ if (alloc_handles == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ allocs = vzalloc(sizeof(void *) * args->allocation_count);
+ if (allocs == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ resource = dxgresource_create(device);
+ if (resource == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ dxgsharedresource_add_resource(shared_resource, resource);
+
+ for (i = 0; i < args->allocation_count; i++) {
+ allocs[i] = dxgallocation_create(process);
+ if (allocs[i] == NULL)
+ goto cleanup;
+ ret = dxgresource_add_alloc(resource, allocs[i]);
+ if (ret < 0)
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_open_resource(process, adapter,
+ device->handle, global_share,
+ args->allocation_count,
+ args->total_priv_drv_data_size,
+ &resource_handle, alloc_handles);
+ if (ret < 0) {
+ pr_err("dxgvmb_send_open_resource failed");
+ goto cleanup;
+ }
+
+ if (shared_resource->runtime_private_data_size) {
+ ret = copy_to_user(args->private_runtime_data,
+ shared_resource->runtime_private_data,
+ shared_resource->runtime_private_data_size);
+ if (ret) {
+ pr_err("%s failed to copy runtime data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ if (shared_resource->resource_private_data_size) {
+ ret = copy_to_user(args->resource_priv_drv_data,
+ shared_resource->resource_private_data,
+ shared_resource->resource_private_data_size);
+ if (ret) {
+ pr_err("%s failed to copy resource data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ if (shared_resource->alloc_private_data_size) {
+ ret = copy_to_user(args->total_priv_drv_data,
+ shared_resource->alloc_private_data,
+ shared_resource->alloc_private_data_size);
+ if (ret) {
+ pr_err("%s failed to copy alloc data", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = assign_resource_handles(process, shared_resource, args,
+ resource_handle, resource, allocs,
+ alloc_handles);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = copy_to_user(res_out, &resource_handle,
+ sizeof(struct d3dkmthandle));
+ if (ret) {
+ pr_err("%s failed to copy resource handle to user", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = copy_to_user(total_driver_data_size_out,
+ &args->total_priv_drv_data_size, sizeof(u32));
+ if (ret) {
+ pr_err("%s failed to copy total driver data size", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ if (ret < 0) {
+ if (resource_handle.v) {
+ struct d3dkmt_destroyallocation2 tmp = { };
+
+ tmp.flags.assume_not_in_use = 1;
+ tmp.device = args->device;
+ tmp.resource = resource_handle;
+ ret = dxgvmb_send_destroy_allocation(process, device,
+ &tmp, NULL);
+ }
+ if (resource)
+ dxgresource_destroy(resource);
+ }
+
+ if (file)
+ fput(file);
+ if (allocs)
+ vfree(allocs);
+ if (shared_resource)
+ kref_put(&shared_resource->sresource_kref,
+ dxgsharedresource_destroy);
+ if (alloc_handles)
+ vfree(alloc_handles);
+ if (adapter)
+ dxgadapter_release_lock_shared(adapter);
+ if (device)
+ dxgdevice_release_lock_shared(device);
+ if (device)
+ kref_put(&device->device_kref, dxgdevice_release);
+
+ return ret;
+}
+
+static int
+dxgk_open_resource(struct dxgprocess *process, void *__user inargs)
+{
+ pr_err("%s is not supported", __func__);
+ return -ENOTTY;
+}
+
+static int
+dxgk_open_resource_nt(struct dxgprocess *process,
+ void *__user inargs)
+{
+ struct d3dkmt_openresourcefromnthandle args;
+ struct d3dkmt_openresourcefromnthandle *__user args_user = inargs;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = open_resource(process, &args,
+ &args_user->resource,
+ &args_user->total_priv_drv_data_size);
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_share_object_with_host(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_shareobjectwithhost args;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_share_object_with_host(process, &args);
+ if (ret) {
+ pr_err("dxgvmb_send_share_object_with_host dailed");
+ goto cleanup;
+ }
+
+ ret = copy_to_user(inargs, &args, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy data to user", __func__);
+ ret = -EINVAL;
+ }
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+static int
+dxgk_render(struct dxgprocess *process, void *__user inargs)
+{
+ pr_err("%s is not implemented", __func__);
+ return -ENOTTY;
+}
+
+static int
+dxgk_create_context(struct dxgprocess *process, void *__user inargs)
+{
+ pr_err("%s is not implemented", __func__);
+ return -ENOTTY;
+}
+
+static int
+dxgk_get_shared_resource_adapter_luid(struct dxgprocess *process,
+ void *__user inargs)
+{
+ pr_err("shared_resource_adapter_luid is not implemented");
+ return -ENOTTY;
+}
+
+static int
+get_resource_host_nthandle(int guest_fd, unsigned long long *host_nthandle)
+{
+ struct dxgsharedresource *shared_resource = NULL;
+ struct file *file = NULL;
+ int ret = 0;
+
+ file = fget(guest_fd);
+ if (!file) {
+ pr_err("failed to get file from handle: %x",
+ guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ if (file->f_op != &dxg_resource_fops) {
+ pr_err("invalid fd type: %x", guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ shared_resource = file->private_data;
+ if (shared_resource == NULL) {
+ pr_err("invalid private data: %x",
+ guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (kref_get_unless_zero(&shared_resource->sresource_kref) == 0) {
+ pr_err("Invalid shared resource handle: %x",
+ (u32)guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ *host_nthandle = (u64) shared_resource->host_shared_handle_nt.v;
+
+cleanup:
+ return ret;
+}
+
+static int
+get_syncobj_host_nthandle(int guest_fd, unsigned long long *host_nthandle)
+{
+ struct dxgsharedsyncobject *syncobj_fd = NULL;
+ struct file *file = NULL;
+ int ret = 0;
+
+ file = fget(guest_fd);
+ if (!file) {
+ pr_err("failed to get file from handle: %x",
+ guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (file->f_op != &dxg_syncobj_fops) {
+ pr_err("invalid fd: %x", guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ syncobj_fd = file->private_data;
+ if (syncobj_fd == NULL) {
+ pr_err("invalid private data: %x", guest_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ *host_nthandle = (u64) syncobj_fd->host_shared_handle_nt.v;
+
+cleanup:
+ return ret;
+}
+
+static int
+dxgk_presentvirtual(struct dxgprocess *process, void *__user inargs)
+{
+ struct d3dkmt_presentvirtual args;
+ __u64 acquire_semaphore_nthandle = 0;
+ __u64 release_semaphore_nthandle = 0;
+ __u64 composition_memory_nthandle = 0;
+ int ret;
+
+ ret = copy_from_user(&args, inargs, sizeof(args));
+ if (ret) {
+ pr_err("%s failed to copy input args", __func__);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ // It is ok if -1 is passed for the acquire semaphore.
+ if (args.acquire_semaphore_fd != -1) {
+ ret = get_syncobj_host_nthandle(args.acquire_semaphore_fd, &acquire_semaphore_nthandle);
+ if (ret) {
+ pr_err("%s failed to host nthandle for fd %x", __func__, args.acquire_semaphore_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ ret = get_syncobj_host_nthandle(args.release_semaphore_fd, &release_semaphore_nthandle);
+ if (ret) {
+ pr_err("%s failed to host nthandle for fd %x", __func__, args.acquire_semaphore_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = get_resource_host_nthandle(args.composition_memory_fd, &composition_memory_nthandle);
+ if (ret) {
+ pr_err("%s failed to host nthandle for fd %x", __func__, args.composition_memory_fd);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ret = dxgvmb_send_present_virtual(process, &args,
+ acquire_semaphore_nthandle, release_semaphore_nthandle, composition_memory_nthandle);
+
+cleanup:
+
+ dev_dbg(dxgglobaldev, "ioctl:%s %s %d", errorstr(ret), __func__, ret);
+ return ret;
+}
+
+/*
+ * IOCTL processing
+ * The driver IOCTLs return
+ * - 0 in case of success
+ * - positive values, which are Windows NTSTATUS (for example, STATUS_PENDING).
+ * Positive values are success codes.
+ * - Linux negative error codes
+ */
+static int dxgk_ioctl(struct file *f, unsigned int p1, unsigned long p2)
+{
+ int code = _IOC_NR(p1);
+ int status;
+ struct dxgprocess *process;
+
+ if (code < 1 || code > LX_IO_MAX) {
+ pr_err("bad ioctl %x %x %x %x",
+ code, _IOC_TYPE(p1), _IOC_SIZE(p1), _IOC_DIR(p1));
+ return -ENOTTY;
+ }
+ if (ioctls[code].ioctl_callback == NULL) {
+ pr_err("ioctl callback is NULL %x", code);
+ return -ENOTTY;
+ }
+ if (ioctls[code].ioctl != p1) {
+ pr_err("ioctl mismatch. Code: %x User: %x Kernel: %x",
+ code, p1, ioctls[code].ioctl);
+ return -ENOTTY;
+ }
+ process = (struct dxgprocess *)f->private_data;
+ if (process->tgid != current->tgid) {
+ pr_err("Call from a wrong process: %d %d",
+ process->tgid, current->tgid);
+ return -ENOTTY;
+ }
+ status = ioctls[code].ioctl_callback(process, (void *__user)p2);
+ return status;
+}
+
+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2)
+{
+ dev_dbg(dxgglobaldev, " compat ioctl %x", p1);
+ return dxgk_ioctl(f, p1, p2);
+}
+
+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2)
+{
+ dev_dbg(dxgglobaldev, " unlocked ioctl %x Code:%d", p1, _IOC_NR(p1));
+ return dxgk_ioctl(f, p1, p2);
+}
+
+#define SET_IOCTL(callback, v) \
+ ioctls[_IOC_NR(v)].ioctl_callback = callback; \
+ ioctls[_IOC_NR(v)].ioctl = v
+
+void init_ioctls(void)
+{
+ SET_IOCTL(/*0x1 */ dxgk_open_adapter_from_luid,
+ LX_DXOPENADAPTERFROMLUID);
+ SET_IOCTL(/*0x2 */ dxgk_create_device,
+ LX_DXCREATEDEVICE);
+ SET_IOCTL(/*0x3 */ dxgk_create_context,
+ LX_DXCREATECONTEXT);
+ SET_IOCTL(/*0x4 */ dxgk_create_context_virtual,
+ LX_DXCREATECONTEXTVIRTUAL);
+ SET_IOCTL(/*0x5 */ dxgk_destroy_context,
+ LX_DXDESTROYCONTEXT);
+ SET_IOCTL(/*0x6 */ dxgk_create_allocation,
+ LX_DXCREATEALLOCATION);
+ SET_IOCTL(/*0x7 */ dxgk_create_paging_queue,
+ LX_DXCREATEPAGINGQUEUE);
+ SET_IOCTL(/*0x8 */ dxgk_reserve_gpu_va,
+ LX_DXRESERVEGPUVIRTUALADDRESS);
+ SET_IOCTL(/*0x9 */ dxgk_query_adapter_info,
+ LX_DXQUERYADAPTERINFO);
+ SET_IOCTL(/*0xa */ dxgk_query_vidmem_info,
+ LX_DXQUERYVIDEOMEMORYINFO);
+ SET_IOCTL(/*0xb */ dxgk_make_resident,
+ LX_DXMAKERESIDENT);
+ SET_IOCTL(/*0xc */ dxgk_map_gpu_va,
+ LX_DXMAPGPUVIRTUALADDRESS);
+ SET_IOCTL(/*0xd */ dxgk_escape,
+ LX_DXESCAPE);
+ SET_IOCTL(/*0xe */ dxgk_get_device_state,
+ LX_DXGETDEVICESTATE);
+ SET_IOCTL(/*0xf */ dxgk_submit_command,
+ LX_DXSUBMITCOMMAND);
+ SET_IOCTL(/*0x10 */ dxgk_create_sync_object,
+ LX_DXCREATESYNCHRONIZATIONOBJECT);
+ SET_IOCTL(/*0x11 */ dxgk_signal_sync_object,
+ LX_DXSIGNALSYNCHRONIZATIONOBJECT);
+ SET_IOCTL(/*0x12 */ dxgk_wait_sync_object,
+ LX_DXWAITFORSYNCHRONIZATIONOBJECT);
+ SET_IOCTL(/*0x13 */ dxgk_destroy_allocation,
+ LX_DXDESTROYALLOCATION2);
+ SET_IOCTL(/*0x14 */ dxgk_enum_adapters,
+ LX_DXENUMADAPTERS2);
+ SET_IOCTL(/*0x15 */ dxgk_close_adapter,
+ LX_DXCLOSEADAPTER);
+ SET_IOCTL(/*0x16 */ dxgk_change_vidmem_reservation,
+ LX_DXCHANGEVIDEOMEMORYRESERVATION);
+ SET_IOCTL(/*0x17 */ dxgk_create_hwcontext,
+ LX_DXCREATEHWCONTEXT);
+ SET_IOCTL(/*0x18 */ dxgk_create_hwqueue,
+ LX_DXCREATEHWQUEUE);
+ SET_IOCTL(/*0x19 */ dxgk_destroy_device,
+ LX_DXDESTROYDEVICE);
+ SET_IOCTL(/*0x1a */ dxgk_destroy_hwcontext,
+ LX_DXDESTROYHWCONTEXT);
+ SET_IOCTL(/*0x1b */ dxgk_destroy_hwqueue,
+ LX_DXDESTROYHWQUEUE);
+ SET_IOCTL(/*0x1c */ dxgk_destroy_paging_queue,
+ LX_DXDESTROYPAGINGQUEUE);
+ SET_IOCTL(/*0x1d */ dxgk_destroy_sync_object,
+ LX_DXDESTROYSYNCHRONIZATIONOBJECT);
+ SET_IOCTL(/*0x1e */ dxgk_evict,
+ LX_DXEVICT);
+ SET_IOCTL(/*0x1f */ dxgk_flush_heap_transitions,
+ LX_DXFLUSHHEAPTRANSITIONS);
+ SET_IOCTL(/*0x20 */ dxgk_free_gpu_va,
+ LX_DXFREEGPUVIRTUALADDRESS);
+ SET_IOCTL(/*0x21 */ dxgk_get_context_process_scheduling_priority,
+ LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY);
+ SET_IOCTL(/*0x22 */ dxgk_get_context_scheduling_priority,
+ LX_DXGETCONTEXTSCHEDULINGPRIORITY);
+ SET_IOCTL(/*0x23 */ dxgk_get_shared_resource_adapter_luid,
+ LX_DXGETSHAREDRESOURCEADAPTERLUID);
+ SET_IOCTL(/*0x24 */ dxgk_invalidate_cache,
+ LX_DXINVALIDATECACHE);
+ SET_IOCTL(/*0x25 */ dxgk_lock2,
+ LX_DXLOCK2);
+ SET_IOCTL(/*0x26 */ dxgk_mark_device_as_error,
+ LX_DXMARKDEVICEASERROR);
+ SET_IOCTL(/*0x27 */ dxgk_offer_allocations,
+ LX_DXOFFERALLOCATIONS);
+ SET_IOCTL(/*0x28 */ dxgk_open_resource,
+ LX_DXOPENRESOURCE);
+ SET_IOCTL(/*0x29 */ dxgk_open_sync_object,
+ LX_DXOPENSYNCHRONIZATIONOBJECT);
+ SET_IOCTL(/*0x2a */ dxgk_query_alloc_residency,
+ LX_DXQUERYALLOCATIONRESIDENCY);
+ SET_IOCTL(/*0x2b */ dxgk_query_resource_info,
+ LX_DXQUERYRESOURCEINFO);
+ SET_IOCTL(/*0x2c */ dxgk_reclaim_allocations,
+ LX_DXRECLAIMALLOCATIONS2);
+ SET_IOCTL(/*0x2d */ dxgk_render,
+ LX_DXRENDER);
+ SET_IOCTL(/*0x2e */ dxgk_set_allocation_priority,
+ LX_DXSETALLOCATIONPRIORITY);
+ SET_IOCTL(/*0x2f */ dxgk_set_context_process_scheduling_priority,
+ LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY);
+ SET_IOCTL(/*0x30 */ dxgk_set_context_scheduling_priority,
+ LX_DXSETCONTEXTSCHEDULINGPRIORITY);
+ SET_IOCTL(/*0x31 */ dxgk_signal_sync_object_cpu,
+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU);
+ SET_IOCTL(/*0x32 */ dxgk_signal_sync_object_gpu,
+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU);
+ SET_IOCTL(/*0x33 */ dxgk_signal_sync_object_gpu2,
+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2);
+ SET_IOCTL(/*0x34 */ dxgk_submit_command_to_hwqueue,
+ LX_DXSUBMITCOMMANDTOHWQUEUE);
+ SET_IOCTL(/*0x35 */ dxgk_submit_wait_to_hwqueue,
+ LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE);
+ SET_IOCTL(/*0x36 */ dxgk_submit_signal_to_hwqueue,
+ LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE);
+ SET_IOCTL(/*0x37 */ dxgk_unlock2,
+ LX_DXUNLOCK2);
+ SET_IOCTL(/*0x38 */ dxgk_update_alloc_property,
+ LX_DXUPDATEALLOCPROPERTY);
+ SET_IOCTL(/*0x39 */ dxgk_update_gpu_va,
+ LX_DXUPDATEGPUVIRTUALADDRESS);
+ SET_IOCTL(/*0x3a */ dxgk_wait_sync_object_cpu,
+ LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU);
+ SET_IOCTL(/*0x3b */ dxgk_wait_sync_object_gpu,
+ LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU);
+ SET_IOCTL(/*0x3c */ dxgk_get_allocation_priority,
+ LX_DXGETALLOCATIONPRIORITY);
+ SET_IOCTL(/*0x3d */ dxgk_query_clock_calibration,
+ LX_DXQUERYCLOCKCALIBRATION);
+ SET_IOCTL(/*0x3e */ dxgk_enum_adapters3,
+ LX_DXENUMADAPTERS3);
+ SET_IOCTL(/*0x3f */ dxgk_share_objects,
+ LX_DXSHAREOBJECTS);
+ SET_IOCTL(/*0x40 */ dxgk_open_sync_object_nt,
+ LX_DXOPENSYNCOBJECTFROMNTHANDLE2);
+ SET_IOCTL(/*0x41 */ dxgk_query_resource_info_nt,
+ LX_DXQUERYRESOURCEINFOFROMNTHANDLE);
+ SET_IOCTL(/*0x42 */ dxgk_open_resource_nt,
+ LX_DXOPENRESOURCEFROMNTHANDLE);
+ SET_IOCTL(/*0x43 */ dxgk_query_statistics,
+ LX_DXQUERYSTATISTICS);
+ SET_IOCTL(/*0x44 */ dxgk_share_object_with_host,
+ LX_DXSHAREOBJECTWITHHOST);
+ SET_IOCTL(/*0x45 */ dxgk_create_sync_file,
+ LX_DXCREATESYNCFILE);
+ SET_IOCTL(/*0x46 */ dxgk_presentvirtual,
+ LX_DXPRESENTVIRTUAL);
+}
diff --git a/virtio_dxgkrnl/misc.c b/virtio_dxgkrnl/misc.c
new file mode 100644
index 0000000..1b152c2
--- /dev/null
+++ b/virtio_dxgkrnl/misc.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Helper functions
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+#include "dxgkrnl.h"
+#include "misc.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dxgk:err: " fmt
+
+u16 *wcsncpy(u16 *dest, const u16 *src, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ dest[i] = src[i];
+ if (src[i] == 0) {
+ i++;
+ break;
+ }
+ }
+ dest[i - 1] = 0;
+ return dest;
+}
+
+char *errorstr(int ret)
+{
+ return ret < 0 ? "err" : "";
+}
+
diff --git a/virtio_dxgkrnl/misc.h b/virtio_dxgkrnl/misc.h
new file mode 100644
index 0000000..3079dd5
--- /dev/null
+++ b/virtio_dxgkrnl/misc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Iouri Tarassov <iourit@linux.microsoft.com>
+ *
+ * Dxgkrnl Graphics Driver
+ * Misc definitions
+ *
+ */
+
+#ifndef _MISC_H_
+#define _MISC_H_
+
+/* Max characters in Windows path */
+#define WIN_MAX_PATH 260
+
+extern const struct d3dkmthandle zerohandle;
+
+/*
+ * Synchronization lock hierarchy.
+ *
+ * The higher enum value, the higher is the lock order.
+ * When a lower lock ois held, the higher lock should not be acquired.
+ *
+ * device_list_mutex
+ * host_event_list_mutex
+ * channel_lock
+ * fd_mutex
+ * plistmutex
+ * table_lock
+ * context_list_lock
+ * alloc_list_lock
+ * resource_mutex
+ * shared_resource_list_lock
+ * core_lock
+ * device_lock
+ * process->process_mutex
+ * process_adapter_mutex
+ * adapter_list_lock
+ * device_mutex
+ */
+
+u16 *wcsncpy(u16 *dest, const u16 *src, size_t n);
+char *errorstr(int ret);
+
+enum dxglockstate {
+ DXGLOCK_SHARED,
+ DXGLOCK_EXCL
+};
+
+/*
+ * Some of the Windows return codes, which needs to be translated to Linux
+ * IOCTL return codes. Positive values are success codes and need to be
+ * returned from the driver IOCTLs. libdxcore.so depends on returning
+ * specific return codes.
+ */
+#define STATUS_SUCCESS ((int)(0))
+#define STATUS_OBJECT_NAME_INVALID ((int)(0xC0000033L))
+#define STATUS_DEVICE_REMOVED ((int)(0xC00002B6L))
+#define STATUS_INVALID_HANDLE ((int)(0xC0000008L))
+#define STATUS_ILLEGAL_INSTRUCTION ((int)(0xC000001DL))
+#define STATUS_NOT_IMPLEMENTED ((int)(0xC0000002L))
+#define STATUS_PENDING ((int)(0x00000103L))
+#define STATUS_ACCESS_DENIED ((int)(0xC0000022L))
+#define STATUS_BUFFER_TOO_SMALL ((int)(0xC0000023L))
+#define STATUS_OBJECT_TYPE_MISMATCH ((int)(0xC0000024L))
+#define STATUS_GRAPHICS_ALLOCATION_BUSY ((int)(0xC01E0102L))
+#define STATUS_NOT_SUPPORTED ((int)(0xC00000BBL))
+#define STATUS_TIMEOUT ((int)(0x00000102L))
+#define STATUS_INVALID_PARAMETER ((int)(0xC000000DL))
+#define STATUS_NO_MEMORY ((int)(0xC0000017L))
+#define STATUS_OBJECT_NAME_COLLISION ((int)(0xC0000035L))
+#define STATUS_OBJECT_NAME_NOT_FOUND ((int)(0xC0000034L))
+
+
+#define NT_SUCCESS(status) (status.v >= 0)
+
+#ifndef DEBUG
+
+#define DXGKRNL_ASSERT(exp)
+
+#else
+
+#define DXGKRNL_ASSERT(exp) \
+do { \
+ if (!(exp)) { \
+ dump_stack(); \
+ BUG_ON(true); \
+ } \
+} while (0)
+
+#endif /* DEBUG */
+
+#endif /* _MISC_H_ */
diff --git a/virtio_dxgkrnl/virtio_dxgkrnl.c b/virtio_dxgkrnl/virtio_dxgkrnl.c
new file mode 100644
index 0000000..93367a0
--- /dev/null
+++ b/virtio_dxgkrnl/virtio_dxgkrnl.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio dxgkrnl implementation.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/atomic.h>
+
+#include "dxgkrnl.h"
+#include "dxgvmbus.h"
+#include "dxgglobal.h"
+#include "virtio_dxgkrnl.h"
+
+enum virtio_dxgkrnl_vq {
+ VIRTIO_DXGKRNL_VQ_SETUP,
+ VIRTIO_DXGKRNL_VQ_COMMAND,
+ VIRTIO_DXGKRNL_VQ_EVENT,
+ VIRTIO_DXGKRNL_VQ_MAX
+};
+
+struct virtio_dxgkrnl_command {
+ struct list_head command_list_entry;
+ enum dxgkvmb_commandtype command_type;
+ struct d3dkmthandle process;
+ bool async;
+ struct completion *completion;
+ void *command;
+ u32 cmd_size;
+ void *result;
+ u32 result_size;
+ refcount_t ref_count;
+ int seqno;
+};
+
+struct virtio_dxgkrnl_event_buffer {
+ union {
+ struct dxgkvmb_command_signalguestevent signalguestevent;
+ struct dxgkvmb_command_setguestdata setguestdata;
+ };
+};
+
+#define VIRTIO_DXGKRNL_NUM_EVENT_BUFFERS 64
+
+struct virtio_dxgkrnl {
+ struct virtio_device *vdev;
+ struct virtqueue *setup_vq;
+ struct virtqueue *command_vq;
+ spinlock_t command_qlock;
+ struct virtqueue *event_vq;
+
+ struct virtio_shm_region iospace_region;
+
+ /* list of commands that are being processed on the host */
+ struct list_head command_list_head;
+ spinlock_t command_list_mutex;
+
+ /* work queues */
+ struct work_struct event_work;
+ struct work_struct command_result_work;
+
+ /* event buffers, there's a fixed number */
+ struct virtio_dxgkrnl_event_buffer
+ event_buffers[VIRTIO_DXGKRNL_NUM_EVENT_BUFFERS];
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_DXGKRNL, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+void set_cmd_type(struct dxgvmbuschannel *channel,
+ struct virtio_dxgkrnl_command *ctx)
+{
+ struct dxgkvmb_command_vgpu_to_host *cmd2;
+ struct dxgkvmb_command_vm_to_host *cmd1;
+ struct dxgvmb_ext_header *hdr;
+ char *adapter_or_global;
+ char *sync_mode;
+
+ hdr = ctx->command;
+
+ if (ctx->async)
+ sync_mode = "async";
+ else
+ sync_mode = "sync";
+
+ if (channel->adapter == NULL) {
+ cmd1 = (struct dxgkvmb_command_vm_to_host *)&hdr[1];
+ ctx->command_type =
+ (enum dxgkvmb_commandtype)cmd1->command_type;
+ ctx->process = cmd1->process;
+ adapter_or_global = "global";
+ } else {
+ cmd2 = (struct dxgkvmb_command_vgpu_to_host *)&hdr[1];
+ ctx->command_type = cmd2->command_type;
+ ctx->process = cmd2->process;
+ adapter_or_global = "adapter";
+ }
+
+ dev_dbg(dxgglobaldev, "send_%s_msg %s: %d %p %d", sync_mode,
+ adapter_or_global, cmd1->command_type, ctx->command,
+ ctx->cmd_size);
+}
+
+static struct virtio_dxgkrnl_command *
+virtio_dxgkrnl_command_create(struct virtio_dxgkrnl *vp,
+ struct dxgvmbuschannel *channel, u32 cmd_size,
+ u32 result_size, bool async)
+{
+ struct virtio_dxgkrnl_command *cmd;
+ static atomic_t cmd_count = ATOMIC_INIT(0);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_err(&vp->vdev->dev, "%s: failed allocate command context\n",
+ __func__);
+ return NULL;
+ }
+ cmd->seqno = atomic_inc_return(&cmd_count);
+ cmd->async = async;
+ cmd->cmd_size = cmd_size;
+ cmd->result_size = result_size;
+
+ cmd->command = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd->command) {
+ dev_err(&vp->vdev->dev, "%s: failed allocate command buffer\n",
+ __func__);
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (result_size != 0) {
+ cmd->result = kzalloc(result_size, GFP_KERNEL);
+ if (!cmd->result) {
+ dev_err(&vp->vdev->dev,
+ "%s: failed allocate result buffer\n",
+ __func__);
+ kfree(cmd);
+ return NULL;
+ }
+ } else {
+ cmd->result = NULL;
+ }
+
+ INIT_LIST_HEAD(&cmd->command_list_entry);
+
+ /* This reference is dropped in dxgkrnl_command_result_work(). */
+ refcount_set(&cmd->ref_count, 1);
+
+ set_cmd_type(channel, cmd);
+
+ return cmd;
+}
+
+void virtio_dxgkrnl_cmd_ref(struct virtio_dxgkrnl_command *cmd)
+{
+ refcount_inc(&cmd->ref_count);
+}
+
+void virtio_dxgkrnl_cmd_unref(struct virtio_dxgkrnl_command *cmd)
+{
+ if (refcount_dec_and_test(&cmd->ref_count)) {
+ kfree(cmd->command);
+ kfree(cmd->result);
+ kfree(cmd);
+ }
+}
+
+static void dxgkrnl_event_callback(struct virtqueue *vq)
+{
+ struct virtio_dxgkrnl *vp;
+
+ vp = vq->vdev->priv;
+
+ queue_work(system_freezable_wq, &vp->event_work);
+};
+
+static void dxgkrnl_event_work(struct work_struct *work)
+{
+ struct virtio_dxgkrnl_event_buffer *eb;
+ struct dxgkvmb_command_host_to_vm *hdr;
+ struct virtio_dxgkrnl *vp;
+ struct scatterlist sg;
+ unsigned int len;
+ bool should_kick;
+
+ vp = container_of(work, struct virtio_dxgkrnl, event_work);
+
+ should_kick = false;
+
+ while ((eb = virtqueue_get_buf(vp->event_vq, &len)) != NULL) {
+ hdr = (struct dxgkvmb_command_host_to_vm *)eb;
+ switch (hdr->command_type) {
+ case DXGK_VMBCOMMAND_SETGUESTDATA:
+ set_guest_data(
+ hdr,
+ sizeof(struct dxgkvmb_command_setguestdata));
+ break;
+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENT:
+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE:
+ signal_guest_event(
+ hdr,
+ sizeof(struct dxgkvmb_command_signalguestevent));
+ break;
+ case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION:
+ /* This message is not used by the driver currently. */
+ break;
+ default:
+ pr_err("unexpected host message %d", hdr->command_type);
+ }
+
+ /* We clear out the event buffer and re-add for use by the host */
+ memset(eb, 0, sizeof(struct virtio_dxgkrnl_event_buffer));
+ sg_init_one(&sg, eb,
+ sizeof(struct virtio_dxgkrnl_event_buffer));
+ virtqueue_add_inbuf(vp->event_vq, &sg, 1, eb, GFP_KERNEL);
+ should_kick = true;
+ };
+
+ if (should_kick)
+ virtqueue_kick(vp->event_vq);
+}
+
+static void dxgkrnl_command_callback(struct virtqueue *vq)
+{
+ struct virtio_dxgkrnl *vp;
+
+ vp = vq->vdev->priv;
+
+ queue_work(system_freezable_wq, &vp->command_result_work);
+};
+
+static void dxgkrnl_command_result_work(struct work_struct *work)
+{
+ struct virtio_dxgkrnl_command *cmd;
+ struct virtio_dxgkrnl *vp;
+ unsigned int len;
+
+ dev_dbg(dxgglobaldev, "%s begins", __func__);
+ vp = container_of(work, struct virtio_dxgkrnl, command_result_work);
+
+ spin_lock(&vp->command_qlock);
+ while ((cmd = virtqueue_get_buf(vp->command_vq, &len)) != NULL) {
+ dev_dbg(dxgglobaldev, "virtqueue_get_buf for command #%d",
+ cmd->seqno);
+ spin_unlock(&vp->command_qlock);
+ if (!cmd->async && cmd->completion != NULL) {
+ dev_dbg(dxgglobaldev, "mark completion for command #%d",
+ cmd->seqno);
+ complete(cmd->completion);
+ }
+
+ virtio_dxgkrnl_cmd_unref(cmd);
+ spin_lock(&vp->command_qlock);
+ };
+ spin_unlock(&vp->command_qlock);
+
+ dev_dbg(dxgglobaldev, "%s ends", __func__);
+}
+
+int dxgglobal_init_global_channel(void)
+{
+ struct virtio_dxgkrnl *vp;
+ int ret;
+
+ vp = dxgglobal->vdxgkrnl;
+
+ if (!virtio_get_shm_region(vp->vdev, &vp->iospace_region,
+ VIRTIO_DXGKRNL_SHM_ID_IOSPACE)) {
+ dev_err(&vp->vdev->dev,
+ "Could not get virtio shared memory region\n");
+ return -EINVAL;
+ }
+
+ if (!devm_request_mem_region(&vp->vdev->dev, vp->iospace_region.addr,
+ vp->iospace_region.len,
+ dev_name(&vp->vdev->dev))) {
+ dev_err(&vp->vdev->dev, "Could not reserve iospace region\n");
+ return -ENOENT;
+ }
+
+ dev_info(&vp->vdev->dev, "virtio-dxgkrnl iospace: 0x%lx +0x%lx\n",
+ (unsigned long)vp->iospace_region.addr,
+ (unsigned long)vp->iospace_region.len);
+
+ dxgglobal->mmiospace_base = vp->iospace_region.addr;
+ dxgglobal->mmiospace_size = vp->iospace_region.len;
+
+ ret = dxgvmb_send_set_iospace_region(dxgglobal->mmiospace_base,
+ dxgglobal->mmiospace_size, 0);
+ if (ret < 0) {
+ dev_err(&vp->vdev->dev, "send_set_iospace_region failed");
+ return ret;
+ }
+
+ dxgglobal->dxgdevice.minor = MISC_DYNAMIC_MINOR;
+ dxgglobal->dxgdevice.name = "dxg";
+ dxgglobal->dxgdevice.fops = &dxgk_fops;
+ dxgglobal->dxgdevice.mode = 0666;
+ ret = misc_register(&dxgglobal->dxgdevice);
+ if (ret) {
+ dev_err(&vp->vdev->dev, "misc_register failed: %d", ret);
+ return ret;
+ }
+ dxgglobaldev = dxgglobal->dxgdevice.this_device;
+ dxgglobal->dxg_dev_initialized = true;
+
+ return ret;
+}
+
+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch)
+{
+ kmem_cache_destroy(ch->packet_cache);
+ ch->packet_cache = NULL;
+}
+
+void dxgglobal_destroy_global_channel(void)
+{
+ down_write(&dxgglobal->channel_lock);
+
+ dxgglobal->global_channel_initialized = false;
+
+ dxgvmbuschannel_destroy(&dxgglobal->channel);
+
+ up_write(&dxgglobal->channel_lock);
+};
+
+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev)
+{
+ int ret = 0;
+
+ ch->hdev = hdev;
+ spin_lock_init(&ch->packet_list_mutex);
+ INIT_LIST_HEAD(&ch->packet_list_head);
+ atomic64_set(&ch->packet_request_id, 0);
+
+ ch->packet_cache = kmem_cache_create(
+ "DXGK packet cache", sizeof(struct dxgvmbuspacket), 0, 0, NULL);
+ if (ch->packet_cache == NULL) {
+ pr_err("packet_cache alloc failed");
+ return -ENOMEM;
+ }
+
+ return ret;
+};
+
+int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, void *command,
+ u32 cmd_size)
+{
+ struct virtio_dxgkrnl_command *ctx;
+ struct virtio_dxgkrnl *vp;
+ struct scatterlist sg;
+ int err;
+ int cur_command_seqno;
+
+ vp = dxgglobal->vdxgkrnl;
+
+ ctx = virtio_dxgkrnl_command_create(vp, channel, cmd_size, 0, true);
+ if (!ctx) {
+ err = -ENOMEM;
+ dev_err(&vp->vdev->dev, "%s: failed allocate command\n",
+ __func__);
+ return err;
+ }
+ cur_command_seqno = ctx->seqno;
+
+ memcpy(ctx->command, command, cmd_size);
+
+ sg_init_one(&sg, ctx->command, cmd_size);
+ spin_lock(&vp->command_qlock);
+ dev_dbg(dxgglobaldev, "virtqueue_add_outbuf for command #%d",
+ cur_command_seqno);
+ err = virtqueue_add_outbuf(vp->command_vq, &sg, 1, ctx, GFP_KERNEL);
+ spin_unlock(&vp->command_qlock);
+
+ if (err) {
+ dev_err(&vp->vdev->dev, "%s: failed to add output: %d\n",
+ __func__, err);
+ return err;
+ }
+ spin_lock(&vp->command_qlock);
+ dev_dbg(dxgglobaldev, "virtqueue_kick for command #%d",
+ cur_command_seqno);
+ if (unlikely(!virtqueue_kick(vp->command_vq))) {
+ dev_err(&vp->vdev->dev,
+ "%s: virtqueue_kick failed with command virtqueue\n",
+ __func__);
+ err = -1;
+ }
+ spin_unlock(&vp->command_qlock);
+
+ return err;
+}
+
+int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, void *command,
+ u32 cmd_size, void *result, u32 result_size)
+{
+ struct scatterlist *sgs[2], command_sg, result_sg;
+ struct completion completion = {};
+ struct virtio_dxgkrnl_command *ctx;
+ struct virtio_dxgkrnl *vp;
+ int err;
+ int cur_command_seqno;
+ bool command_queue_locked = false;
+
+ vp = dxgglobal->vdxgkrnl;
+
+ ctx = virtio_dxgkrnl_command_create(vp, channel, cmd_size, result_size,
+ false);
+ if (!ctx) {
+ err = -ENOMEM;
+ dev_err(&vp->vdev->dev, "%s: failed allocate command\n",
+ __func__);
+ return err;
+ }
+ memcpy(ctx->command, command, cmd_size);
+
+ /* Take a ref to this command because `completion` is on the stack here and
+ * we need to remove the pointer to completion in case we're interrupted
+ * during wait_for_completion_interruptible.
+ */
+ virtio_dxgkrnl_cmd_ref(ctx);
+ cur_command_seqno = ctx->seqno;
+ init_completion(&completion);
+ ctx->completion = &completion;
+
+ sg_init_one(&command_sg, ctx->command, cmd_size);
+ sgs[0] = &command_sg;
+
+ sg_init_one(&result_sg, ctx->result, result_size);
+ sgs[1] = &result_sg;
+
+ spin_lock(&vp->command_qlock);
+ command_queue_locked = true;
+ dev_dbg(dxgglobaldev, "virtqueue_add_sgs for command #%d",
+ cur_command_seqno);
+ err = virtqueue_add_sgs(vp->command_vq, sgs, 1, 1, ctx, GFP_ATOMIC);
+ if (err) {
+ dev_err(&vp->vdev->dev, "%s: failed to add output: %d\n",
+ __func__, err);
+ goto cleanup;
+ }
+
+ dev_dbg(dxgglobaldev, "virtqueue_kick for command #%d",
+ cur_command_seqno);
+ if (unlikely(!virtqueue_kick(vp->command_vq))) {
+ dev_err(&vp->vdev->dev,
+ "%s: virtqueue_kick failed with command virtqueue\n",
+ __func__);
+ err = -1;
+ goto cleanup;
+ }
+ spin_unlock(&vp->command_qlock);
+ command_queue_locked = false;
+
+ /* Spin for a response, the kick causes an ioport write, trapping
+ * into the hypervisor, so the request should be handled immediately.
+ */
+ dev_dbg(dxgglobaldev, "wait_for_completion_interruptible #%d start",
+ cur_command_seqno);
+ wait_for_completion_interruptible(&completion);
+ // In case we've been interrupted, set completion to NULL here on ctx.
+ ctx->completion = NULL;
+ dev_dbg(dxgglobaldev, "wait_for_completion_interruptible #%d end",
+ cur_command_seqno);
+
+ memcpy(result, ctx->result, result_size);
+
+ /* Calling code expects this to be >0 if result_size >0. This doesn't
+ * reflect if any data was actually written to the result buffer, however.
+ * The uses of this >0 return value should be reexamined, perhaps it's
+ * unnecessary.
+ */
+ err = result_size;
+
+cleanup:
+ virtio_dxgkrnl_cmd_unref(ctx);
+ if (command_queue_locked)
+ spin_unlock(&vp->command_qlock);
+
+ return err;
+}
+
+struct winluid luid_from_int64(int64_t value)
+{
+ struct large_integer i;
+ struct winluid luid;
+
+ i.quadpart = value;
+
+ luid.a = i.u.lowpart;
+ luid.b = i.u.highpart;
+ return luid;
+}
+
+static int initialize_adapters(struct virtio_dxgkrnl *vp)
+{
+ struct virtio_dxgkrnl_enum_adapters_resp *resp;
+ struct virtio_dxgkrnl_enum_adapters_req *req;
+ struct scatterlist *sgs[2], req_sg, resp_sg;
+ struct dxgvgpuchannel *vgpuch;
+ size_t req_size, resp_size;
+ struct winluid luid;
+ __s64 luid_value;
+ u64 num_adapters;
+ unsigned int tmp;
+ int err = 0;
+ int i;
+
+ dev_err(&vp->vdev->dev, "%s: initializing adapters\n", __func__);
+
+ num_adapters = virtio_cread64(
+ vp->vdev, offsetof(struct virtio_dxgkrnl_config, num_adapters));
+
+ req_size = sizeof(struct virtio_dxgkrnl_enum_adapters_req);
+ req = kzalloc(req_size, GFP_ATOMIC);
+ req->num_adapters = num_adapters;
+ req->adapter_offset = 0;
+
+ sg_init_one(&req_sg, req, req_size);
+ sgs[0] = &req_sg;
+
+ resp_size = sizeof(struct virtio_dxgkrnl_enum_adapters_resp) +
+ sizeof(__s64) * num_adapters;
+ resp = kzalloc(resp_size, GFP_ATOMIC);
+
+ sg_init_one(&resp_sg, resp, resp_size);
+ sgs[1] = &resp_sg;
+
+ err = virtqueue_add_sgs(vp->setup_vq, sgs, 1, 1, vp, GFP_ATOMIC);
+ if (err) {
+ dev_err(&vp->vdev->dev, "%s: failed to add output: %d\n",
+ __func__, err);
+ goto cleanup;
+ }
+
+ if (unlikely(!virtqueue_kick(vp->setup_vq))) {
+ dev_err(&vp->vdev->dev,
+ "%s: virtqueue_kick failed with setup virtqueue\n",
+ __func__);
+ err = -1;
+ goto cleanup;
+ }
+
+ dev_err(&vp->vdev->dev, "%s: virtqueue_kick succeeded\n", __func__);
+
+ while (!virtqueue_get_buf(vp->setup_vq, &tmp) &&
+ !virtqueue_is_broken(vp->setup_vq))
+ cpu_relax();
+
+ if (resp->status) {
+ dev_err(&vp->vdev->dev,
+ "%s: enum_adapters failed with response status %llu\n",
+ __func__, resp->status);
+ err = resp->status;
+ goto cleanup;
+ }
+
+ for (i = 0; i < num_adapters; i++) {
+ dev_err(&vp->vdev->dev, "%s: processing adapter %i\n", __func__,
+ i);
+ luid_value = resp->vgpu_luids[i];
+ luid = luid_from_int64(luid_value);
+ dev_err(&vp->vdev->dev, "%s: got luid from value %lli\n",
+ __func__, luid_value);
+
+ vgpuch = vzalloc(sizeof(struct dxgvgpuchannel));
+ if (vgpuch == NULL) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ vgpuch->adapter_luid = luid_from_int64(i);
+ vgpuch->hdev = NULL;
+ list_add_tail(&vgpuch->vgpu_ch_list_entry,
+ &dxgglobal->vgpu_ch_list_head);
+
+ err = dxgglobal_create_adapter(NULL, vgpuch->adapter_luid,
+ luid);
+ dev_err(&vp->vdev->dev, "%s: created adapter\n", __func__);
+ if (err) {
+ dev_err(&vp->vdev->dev,
+ "%s: failed to create adapter for luid %x-%x: %u\n",
+ __func__, luid.a, luid.b, err);
+ goto cleanup;
+ }
+ }
+
+ dev_err(&vp->vdev->dev, "%s: starting adapters\n", __func__);
+
+ dxgglobal_start_adapters();
+
+cleanup:
+ kfree(req);
+ kfree(resp);
+ return err;
+}
+
+static void fill_event_queue(struct virtio_dxgkrnl *vp)
+{
+ struct scatterlist sg;
+ int i;
+
+ for (i = 0; i < VIRTIO_DXGKRNL_NUM_EVENT_BUFFERS; i++) {
+ sg_init_one(&sg, &vp->event_buffers[i],
+ sizeof(struct virtio_dxgkrnl_event_buffer));
+ virtqueue_add_inbuf(vp->event_vq, &sg, 1, &vp->event_buffers[i],
+ GFP_KERNEL);
+ }
+ virtqueue_kick(vp->event_vq);
+}
+
+static int init_vqs(struct virtio_dxgkrnl *vp)
+{
+ vq_callback_t *callbacks[VIRTIO_DXGKRNL_VQ_MAX];
+ struct virtqueue *vqs[VIRTIO_DXGKRNL_VQ_MAX];
+ const char *names[VIRTIO_DXGKRNL_VQ_MAX];
+ int err;
+
+ callbacks[VIRTIO_DXGKRNL_VQ_SETUP] = NULL;
+ names[VIRTIO_DXGKRNL_VQ_SETUP] = "dxgkrnl_setup";
+
+ callbacks[VIRTIO_DXGKRNL_VQ_COMMAND] = dxgkrnl_command_callback;
+ names[VIRTIO_DXGKRNL_VQ_COMMAND] = "dxgkrnl_command";
+ INIT_WORK(&vp->command_result_work, dxgkrnl_command_result_work);
+
+ callbacks[VIRTIO_DXGKRNL_VQ_EVENT] = dxgkrnl_event_callback;
+ names[VIRTIO_DXGKRNL_VQ_EVENT] = "dxgkrnl_event";
+ INIT_WORK(&vp->event_work, dxgkrnl_event_work);
+
+ err = vp->vdev->config->find_vqs(vp->vdev, VIRTIO_DXGKRNL_VQ_MAX, vqs,
+ callbacks, names, NULL, NULL);
+ if (err)
+ return err;
+
+ vp->setup_vq = vqs[VIRTIO_DXGKRNL_VQ_SETUP];
+ vp->command_vq = vqs[VIRTIO_DXGKRNL_VQ_COMMAND];
+ vp->event_vq = vqs[VIRTIO_DXGKRNL_VQ_EVENT];
+
+ return 0;
+}
+
+static int dxgglobal_create(void)
+{
+ int ret = 0;
+
+ dxgglobal = vzalloc(sizeof(struct dxgglobal));
+ if (!dxgglobal)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dxgglobal->plisthead);
+ mutex_init(&dxgglobal->plistmutex);
+ mutex_init(&dxgglobal->device_mutex);
+ mutex_init(&dxgglobal->process_adapter_mutex);
+
+ INIT_LIST_HEAD(&dxgglobal->thread_info_list_head);
+ mutex_init(&dxgglobal->thread_info_mutex);
+
+ INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head);
+ INIT_LIST_HEAD(&dxgglobal->adapter_list_head);
+ init_rwsem(&dxgglobal->adapter_list_lock);
+
+ init_rwsem(&dxgglobal->channel_lock);
+
+ INIT_LIST_HEAD(&dxgglobal->host_event_list_head);
+ spin_lock_init(&dxgglobal->host_event_list_mutex);
+ atomic64_set(&dxgglobal->host_event_id, 1);
+
+ hmgrtable_init(&dxgglobal->handle_table, NULL);
+
+ // Setting this on prevents using GPADL for existing sysmem allocations.
+ dxgglobal->map_guest_pages_enabled = true;
+
+ dev_dbg(dxgglobaldev, "dxgglobal_init end\n");
+ return ret;
+}
+
+static void dxgglobal_destroy(void)
+{
+ if (dxgglobal) {
+ dxgglobal_stop_adapters();
+
+ dxgglobal_destroy_global_channel();
+ hmgrtable_destroy(&dxgglobal->handle_table);
+
+ vfree(dxgglobal);
+ dxgglobal = NULL;
+ }
+}
+
+static struct virtio_dxgkrnl *virtio_dxgkrnl_create(void)
+{
+ struct virtio_dxgkrnl *vp;
+
+ vp = kzalloc(sizeof(*vp), GFP_KERNEL);
+ if (vp) {
+ spin_lock_init(&vp->command_qlock);
+ spin_lock_init(&vp->command_list_mutex);
+ INIT_LIST_HEAD(&vp->command_list_head);
+ }
+
+ return vp;
+}
+
+static int virtdxgkrnl_probe(struct virtio_device *vdev)
+{
+ struct virtio_dxgkrnl *vp;
+ int err;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s: config access disabled\n", __func__);
+ return -EINVAL;
+ }
+
+ vp = virtio_dxgkrnl_create();
+ if (!vp)
+ return -ENOMEM;
+
+ vp->vdev = vdev;
+ vdev->priv = vp;
+
+ dxgglobaldev = &vp->vdev->dev;
+
+ err = dxgglobal_create();
+ if (err) {
+ pr_err("dxgglobal_init failed");
+ goto out_free_vp;
+ }
+
+ // virtio-dxgkrnl always uses extension header
+ dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION;
+
+ err = init_vqs(vp);
+ if (err)
+ goto out_free_vp;
+
+ virtio_device_ready(vdev);
+
+ dxgglobal->vdxgkrnl = vp;
+
+ fill_event_queue(vp);
+ init_ioctls();
+
+ if (virtio_has_feature(vp->vdev, VIRTIO_DXGKRNL_F_ASYNC_COMMANDS))
+ dxgglobal->async_msg_enabled = true;
+
+ return initialize_adapters(vp);
+
+out_free_vp:
+ kfree(vp);
+ return err;
+}
+
+static void remove_common(struct virtio_dxgkrnl *vp)
+{
+ /* Now we reset the device so we can clean up the queues. */
+ vp->vdev->config->reset(vp->vdev);
+
+ vp->vdev->config->del_vqs(vp->vdev);
+
+ dxgglobal_destroy();
+}
+
+static void virtdxgkrnl_remove(struct virtio_device *vdev)
+{
+ struct virtio_dxgkrnl *vp = vdev->priv;
+
+ remove_common(vp);
+
+ kfree(vp);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtdxgkrnl_freeze(struct virtio_device *vdev)
+{
+ struct virtio_dxgkrnl *vp = vdev->priv;
+
+ remove_common(vp);
+ return 0;
+}
+
+static int virtdxgkrnl_restore(struct virtio_device *vdev)
+{
+ int ret;
+
+ ret = init_vqs(vdev->priv);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(vdev);
+
+ return 0;
+}
+#endif
+
+static int virtdxgkrnl_validate(struct virtio_device *vdev)
+{
+ return 0;
+}
+
+static unsigned int features[] = { VIRTIO_DXGKRNL_F_ASYNC_COMMANDS };
+
+static struct virtio_driver virtio_dxgkrnl_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .validate = virtdxgkrnl_validate,
+ .probe = virtdxgkrnl_probe,
+ .remove = virtdxgkrnl_remove,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtdxgkrnl_freeze,
+ .restore = virtdxgkrnl_restore,
+#endif
+};
+
+module_virtio_driver(virtio_dxgkrnl_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio dxgkrnl driver");
+MODULE_LICENSE("GPL");
diff --git a/virtio_dxgkrnl/virtio_dxgkrnl.h b/virtio_dxgkrnl/virtio_dxgkrnl.h
new file mode 100644
index 0000000..42e78c6
--- /dev/null
+++ b/virtio_dxgkrnl/virtio_dxgkrnl.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_DXGKRNL_H
+#define _LINUX_VIRTIO_DXGKRNL_H
+
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+#define VIRTIO_ID_DXGKRNL 59 /* virtio dxgkrnl (experimental id) */
+
+/* Status values for a virtio_dxgkrnl request. */
+#define VIRTIO_DXGKRNL_S_OK 0
+#define VIRTIO_DXGKRNL_S_IOERR 1
+#define VIRTIO_DXGKRNL_S_UNSUPP 2
+
+/* The feature bitmap for virtio dxgkrnl */
+/* Async commands */
+#define VIRTIO_DXGKRNL_F_ASYNC_COMMANDS 0
+
+struct virtio_dxgkrnl_config {
+ /* Number of dxgkrnl adapters */
+ __u64 num_adapters;
+};
+
+struct virtio_dxgkrnl_enum_adapters_req {
+ /* Number of adapters to enumerate.*/
+ __u64 num_adapters;
+ /* Offset into adapters to start enumeration. */
+ __u64 adapter_offset;
+};
+
+struct virtio_dxgkrnl_enum_adapters_resp {
+ /* Status of this request, one of VIRTIO_DXGKRNL_S_*. */
+ __u8 status;
+ __u8 padding[7];
+ /* Array of luids for device to return. */
+ __s64 vgpu_luids[0];
+};
+
+/* For the id field in virtio_pci_shm_cap */
+#define VIRTIO_DXGKRNL_SHM_ID_IOSPACE 0
+
+#endif /* _LINUX_VIRTIO_DXGKRNL_H */