summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>2022-03-09 17:52:44 +0200
committerNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>2022-06-17 21:22:20 +0000
commite376bb438cb3c827ef177c8f65ec3a74c7bfdad1 (patch)
tree0947f34e73f7ebf43ab57e92d7b9da026e319c66
parent7f3cd757aa75e4c2a5d4a6c5c077cbb3bd49236a (diff)
drm/i915/vm_bind: Introduce VM_BIND ioctl
Add VM_BIND and VM_UNBIND ioctls to bind/unbind a section of an object at the specified GPU virtual addresses. Also add I915_VM_CREATE_FLAGS_USE_VM_BIND to select vm_bind mode of binding. Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c30
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c3
-rw-r--r--include/uapi/drm/i915_drm.h113
6 files changed, 169 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab4c5ab28e4d..039e70f3691b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -81,7 +81,6 @@
#include "pxp/intel_pxp.h"
-#include "i915_file_private.h"
#include "i915_gem_context.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -346,20 +345,6 @@ static int proto_context_register(struct drm_i915_file_private *fpriv,
return ret;
}
-static struct i915_address_space *
-i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
-{
- struct i915_address_space *vm;
-
- xa_lock(&file_priv->vm_xa);
- vm = xa_load(&file_priv->vm_xa, id);
- if (vm)
- kref_get(&vm->ref);
- xa_unlock(&file_priv->vm_xa);
-
- return vm;
-}
-
static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
@@ -1791,7 +1776,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (!HAS_FULL_PPGTT(i915))
return -ENODEV;
- if (args->flags)
+ if (args->flags & I915_VM_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
ppgtt = i915_ppgtt_create(to_gt(i915), 0);
@@ -1811,6 +1796,9 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (err)
goto err_put;
+ if (args->flags & I915_VM_CREATE_FLAGS_USE_VM_BIND)
+ ppgtt->vm.vm_bind_mode = true;
+
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->vm_id = id;
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index e5b0f66ea1fe..723bf446c934 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -12,6 +12,7 @@
#include "gt/intel_context.h"
#include "i915_drv.h"
+#include "i915_file_private.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@@ -139,6 +140,20 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+static inline struct i915_address_space *
+i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct i915_address_space *vm;
+
+ xa_lock(&file_priv->vm_xa);
+ vm = xa_load(&file_priv->vm_xa, id);
+ if (vm)
+ kref_get(&vm->ref);
+ xa_unlock(&file_priv->vm_xa);
+
+ return vm;
+}
+
struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index a40d928b3888..7ab2e3be2bcb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -271,6 +271,12 @@ struct i915_address_space {
/* Skip pte rewrite on unbind for suspend. Protected by @mutex */
bool skip_pte_rewrite:1;
+ /**
+ * true: allow only vm_bind method of binding.
+ * false: allow only legacy execbuff method of binding.
+ */
+ bool vm_bind_mode:1;
+
u8 top;
u8 pd_shift;
u8 scratch_order;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 90b0ce5051af..4c852dfacf2e 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -1750,6 +1750,34 @@ i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
+static int i915_gem_vm_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_vm_bind *args = data;
+ struct i915_address_space *vm;
+
+ vm = i915_gem_vm_lookup(file->driver_priv, args->vm_id);
+ if (unlikely(!vm))
+ return -ENOENT;
+
+ i915_vm_put(vm);
+ return -EINVAL;
+}
+
+static int i915_gem_vm_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_vm_unbind *args = data;
+ struct i915_address_space *vm;
+
+ vm = i915_gem_vm_lookup(file->driver_priv, args->vm_id);
+ if (unlikely(!vm))
+ return -ENOENT;
+
+ i915_vm_put(vm);
+ return -EINVAL;
+}
+
static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
@@ -1810,6 +1838,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_BIND, i915_gem_vm_bind_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_UNBIND, i915_gem_vm_unbind_ioctl, DRM_RENDER_ALLOW),
};
/*
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index c12a0adefda5..f25cca8d72b4 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -168,6 +168,9 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_PERF_REVISION:
value = i915_perf_ioctl_version();
break;
+ case I915_PARAM_HAS_VM_BIND:
+ value = GRAPHICS_VER(i915) >= 12;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 35ca528803fd..e3a1f8000abb 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -420,6 +420,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
#define DRM_I915_GEM_CREATE_EXT 0x3c
+#define DRM_I915_GEM_VM_BIND 0x3d
+#define DRM_I915_GEM_VM_UNBIND 0x3e
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -484,6 +486,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_unbind)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -699,6 +703,9 @@ typedef struct drm_i915_irq_wait {
/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
#define I915_PARAM_HAS_USERPTR_PROBE 56
+/* VM_BIND feature availability */
+#define I915_PARAM_HAS_VM_BIND 57
+
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -2347,8 +2354,6 @@ struct drm_i915_gem_context_destroy {
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
- * No flags are defined, with all bits reserved and must be zero.
- *
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
@@ -2360,6 +2365,10 @@ struct drm_i915_gem_context_destroy {
*/
struct drm_i915_gem_vm_control {
__u64 extensions;
+
+#define I915_VM_CREATE_FLAGS_USE_VM_BIND (1u << 0)
+#define I915_VM_CREATE_FLAGS_UNKNOWN \
+ (-(I915_VM_CREATE_FLAGS_USE_VM_BIND << 1))
__u32 flags;
__u32 vm_id;
};
@@ -3454,6 +3463,106 @@ struct drm_i915_gem_create_ext_protected_content {
/* ID of the protected content session managed by i915 when PXP is active */
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+/**
+ * struct drm_i915_gem_vm_bind_fence - Bind/unbind completion notification.
+ *
+ * A timeline out fence for vm_bind/unbind completion notification.
+ */
+struct drm_i915_gem_vm_bind_fence {
+ /** @handle: User's handle for a drm_syncobj to signal. */
+ __u32 handle;
+
+ /** @rsvd: Reserved, MBZ */
+ __u32 rsvd;
+
+ /**
+ * @value: A point in the timeline.
+ * Value must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
+ */
+ __u64 value;
+};
+
+/**
+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
+ *
+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
+ * virtual address (VA) range to the section of an object that should be bound
+ * in the device page table of the specified address space (VM).
+ * The VA range specified must be unique (ie., not currently bound) and can
+ * be mapped to whole object or a section of the object (partial binding).
+ * Multiple VA mappings can be created to the same section of the object
+ * (aliasing).
+ *
+ * The @start, @offset and @length should be 4K page aligned. However the DG2
+ * and XEHPSDV has 64K page size for device local-memory and has compact page
+ * table. On those platforms, for binding device local-memory objects, the
+ * @start should be 2M aligned, @offset and @length should be 64K aligned.
+ * Also, on those platforms, it is not allowed to bind an device local-memory
+ * object and a system memory object in a single 2M section of VA range.
+ */
+struct drm_i915_gem_vm_bind {
+ /** @vm_id: VM (address space) id to bind */
+ __u32 vm_id;
+
+ /** @handle: Object handle */
+ __u32 handle;
+
+ /** @start: Virtual Address start to bind */
+ __u64 start;
+
+ /** @offset: Offset in object to bind */
+ __u64 offset;
+
+ /** @length: Length of mapping to bind */
+ __u64 length;
+
+ /** @flags: reserved for future usage, currently MBZ */
+ __u64 flags;
+
+ /** @fence: Timeline fence for bind completion signaling */
+ struct drm_i915_gem_vm_bind_fence fence;
+
+ /** @extensions: 0-terminated chain of extensions */
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
+ *
+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
+ * address (VA) range that should be unbound from the device page table of the
+ * specified address space (VM). The specified VA range must match one of the
+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
+ * completion.
+ *
+ * The @start and @length musy specify a unique mapping bound with VM_BIND
+ * ioctl.
+ */
+struct drm_i915_gem_vm_unbind {
+ /** @vm_id: VM (address space) id to bind */
+ __u32 vm_id;
+
+ /** @rsvd: Reserved, MBZ */
+ __u32 rsvd;
+
+ /** @start: Virtual Address start to unbind */
+ __u64 start;
+
+ /** @length: Length of mapping to unbind */
+ __u64 length;
+
+ /** @flags: reserved for future usage, currently MBZ */
+ __u64 flags;
+
+ /** @fence: Timeline fence for bind completion signaling */
+ struct drm_i915_gem_vm_bind_fence fence;
+
+ /** @extensions: 0-terminated chain of extensions */
+ __u64 extensions;
+};
+
#if defined(__cplusplus)
}
#endif