summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMika Kuoppala <mika.kuoppala@intel.com>2016-12-07 16:05:40 +0200
committerMika Kuoppala <mika.kuoppala@intel.com>2017-02-07 15:23:53 +0200
commit81f999531516455061510a327df3c5288d389a29 (patch)
tree90979befea75d44b34437a1961c58c4dd301e278
parentc5854989052af7d2022fb042bd7c69c2e27132ee (diff)
tests/gem_svm: Add new test for svm functionalitysvm
Heavily based on code by Jesse Barnes, squashed and brought up to date from: https://cgit.freedesktop.org/~jbarnes/intel-gpu-tools/log/?h=svm Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
-rw-r--r--lib/ioctl_wrappers.h1
-rw-r--r--tests/Makefile.sources1
-rw-r--r--tests/gem_svm.c395
3 files changed, 397 insertions, 0 deletions
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 64628df7..7da28468 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -121,6 +121,7 @@ struct local_i915_gem_context_param {
#define LOCAL_CONTEXT_PARAM_GTT_SIZE 0x3
#define LOCAL_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
#define LOCAL_CONTEXT_PARAM_BANNABLE 0x5
+#define LOCAL_CONTEXT_PARAM_SVM 0x6
uint64_t value;
};
void gem_context_require_bannable(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 6e07d938..a3ff0ee7 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -89,6 +89,7 @@ TESTS_progs_M = \
gem_tiled_partial_pwrite_pread \
gem_userptr_blits \
gem_write_read_ring_switch \
+ gem_svm \
gvt_basic \
kms_addfb_basic \
kms_atomic \
diff --git a/tests/gem_svm.c b/tests/gem_svm.c
new file mode 100644
index 00000000..16d03524
--- /dev/null
+++ b/tests/gem_svm.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ */
+
+#include <signal.h>
+#include <sys/poll.h>
+
+#include "igt.h"
+
+/*
+ * Sanity check for SVM - just malloc batch and target buffers, store some
+ * data from the GPU into the target, and check it for the right result
+ * on the CPU.
+ */
+
+static int devid;
+static drm_intel_bufmgr *bufmgr;
+static drm_intel_bo *target_bo;
+static uint32_t *target_buffer;
+static uint32_t *expected_fault_addr;
+static uint32_t batch_buffer[8];
+static sigjmp_buf bad_fault_env;
+
+/**
+ * drm_i915_exec_svm - shared virtual memory execbuf
+ * @batch_ptr: address of batch buffer (in context's CPU address space)
+ * @ctx_id: context to use for execution, must be svm capable
+ * @engine_id: engine to which this context will be submitted, see execbuffer2
+ * @flags: used to signal if in/out fences should be used
+ * @out_fence_fd: returned fence handle of out fence you can wait on
+ * @in_fence_fd: given fence handle of fence the gpu will wait for
+ *
+ * This simplified execbuf just executes an MI_BATCH_BUFFER_START at
+ * @batch_ptr using @ctx_id as the context. The context will indicate
+ * which address space the @batch_ptr will use.
+ *
+ * Note @batch_ptr must be dword aligned.
+ *
+ * By default, the kernel will simply execute the address given on the GPU.
+ *
+ * If the %I915_EXEC_SVM_FENCE_OUT flag is passed in the @flags field however,
+ * the kernel will return a sync_file (dma_fence) object in @out_fence_fd for
+ * the caller to use to synchronize execution of the passed batch.
+ *
+ * If the %I915_EXEC_SVM_FENCE_IN flag is passed in the @flags field,
+ * the kernel will wait until the fence (dma_fence) object passed in
+ * @in_fence_fd to complete before submitting batch to gpu.
+ *
+ */
+struct local_drm_i915_exec_svm {
+ __u64 batch_ptr;
+ __u32 ctx_id;
+ __u32 engine_id; /* see execbuffer2 ring flags */
+
+#define I915_EXEC_SVM_FENCE_OUT (1<<0)
+#define I915_EXEC_SVM_FENCE_IN (1<<1)
+ __u64 flags;
+
+ __u32 in_fence_fd;
+ __u32 out_fence_fd;
+};
+
+#define LOCAL_DRM_IOCTL_I915_EXEC_SVM DRM_IOWR(DRM_COMMAND_BASE + 0x37, struct local_drm_i915_exec_svm)
+
+static uint32_t gem_context_create_svm(int fd)
+{
+ uint32_t ctx;
+ struct local_i915_gem_context_param arg;
+
+ ctx = gem_context_create(fd);
+
+ memset(&arg, 0, sizeof(arg));
+ arg.param = LOCAL_CONTEXT_PARAM_SVM;
+ arg.context = ctx;
+
+ gem_context_get_param(fd, &arg);
+ igt_assert(arg.value == 0);
+ arg.value = 1;
+ gem_context_set_param(fd, &arg);
+ arg.value = 0;
+ gem_context_get_param(fd, &arg);
+ igt_assert(arg.value == 1);
+
+ return ctx;
+}
+
+static int sync_wait(int fd, int timeout)
+{
+ struct pollfd fds = {0};
+ int ret;
+
+ fds.fd = fd;
+ fds.events = POLLIN;
+
+ do {
+ ret = poll(&fds, 1, timeout);
+ if (ret > 0) {
+ if (fds.revents & (POLLERR | POLLNVAL)) {
+ errno = EINVAL;
+ return -1;
+ }
+ return 0;
+ } else if (ret == 0) {
+ errno = ETIME;
+ return -1;
+ }
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ return ret;
+}
+
+static void
+emit_store_dword_imm(uint32_t *bb, uint32_t *buf, uint32_t val)
+{
+ uint32_t addr_hi, addr_lo;
+ int cmd;
+
+ cmd = MI_STORE_DWORD_IMM;
+
+ addr_hi = ((uint64_t)buf) >> 32;
+ addr_lo = ((uint64_t)buf) & 0xffffffff;
+
+ *bb++ = cmd;
+ *bb++ = addr_lo;
+ *bb++ = addr_hi;
+ *bb++ = val;
+ *bb++ = MI_NOOP;
+ *bb++ = MI_NOOP;
+ *bb++ = MI_NOOP | MI_NOOP_WRITE_ID | (0x0f00);
+ *bb++ = MI_BATCH_BUFFER_END;
+}
+
+static void __exec_svm(int fd, uint32_t engine, uint32_t *batch, uint32_t ctx_id,
+ uint32_t *fence_out)
+{
+ struct local_drm_i915_exec_svm mm;
+
+ memset (&mm, 0, sizeof(mm));
+
+ mm.batch_ptr = (uint64_t)batch;
+ mm.ctx_id = ctx_id;
+ mm.engine_id = engine;
+
+ if (fence_out)
+ mm.flags = I915_EXEC_SVM_FENCE_OUT;
+
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_I915_EXEC_SVM, &mm)) {
+ int err = -errno;
+ igt_skip_on(err == -ENODEV);
+ igt_assert_eq(err, 0);
+ }
+
+ if (fence_out)
+ *fence_out = mm.out_fence_fd;
+
+ errno = 0;
+}
+
+static uint32_t exec_svm_fence(int fd, const struct intel_execution_engine *e,
+ uint32_t *batch, uint32_t ctx_id)
+{
+ uint32_t fence;
+
+ __exec_svm(fd, e->exec_id, batch, ctx_id, &fence);
+
+ return fence;
+}
+
+
+static void exec_svm(int fd, const struct intel_execution_engine *e,
+ uint32_t *batch, uint32_t ctx_id)
+{
+ __exec_svm(fd, e->exec_id, batch, ctx_id, NULL);
+}
+
+static void
+store_test(int fd, const struct intel_execution_engine *e, int count_bytes, int offset, int div)
+{
+ int timeout = 1000; /* in ms */
+ uint32_t val = 0x13370000;
+ int i, ret, count_dwords = count_bytes / 4;
+ int ctx;
+ unsigned long fence_waits = 0;
+
+ /* Pass memset for fault testing */
+ if (target_buffer)
+ memset(target_buffer, 0, count_bytes);
+
+ ctx = gem_context_create_svm(fd);
+ igt_info("using GPU to write to %p\n", target_buffer);
+
+ for (i = 0; i < count_dwords; i++) {
+ emit_store_dword_imm(batch_buffer, target_buffer + offset + i,
+ val + i);
+
+ if (!div || (i % div) == 0) {
+ int fence = exec_svm_fence(fd, e, batch_buffer, ctx);
+ ret = sync_wait(fence, timeout);
+ fence_waits++;
+
+ igt_assert_f(!ret, "fence wait returned (%d): %s\n",
+ errno, strerror(errno));
+
+ if (target_buffer) {
+ igt_assert_f(*(uint32_t *)(target_buffer + offset + i)
+ == (val + i),
+ "value mismatch: read 0x%08x, expected 0x%08x, offset 0x%08x\n",
+ *(uint32_t *)(target_buffer + offset + i),
+ val + i,
+ offset + i);
+ }
+
+ close(fence);
+ } else {
+ exec_svm(fd, e, batch_buffer, ctx);
+ }
+ }
+
+ igt_info("waited for %lu out fences\n", fence_waits);
+
+ gem_context_destroy(fd, ctx);
+}
+
+static void bad_fault_segv_handler(int signo, siginfo_t *sinfo, void *context)
+{
+ int ret;
+
+ igt_assert_f(sinfo->si_addr == expected_fault_addr,
+ "fault address doesn't match expected fault address\n");
+
+ /* On success we skip the storedw */
+ ret = 1;
+ siglongjmp(bad_fault_env, ret);
+}
+
+static void test_bad_fault(int fd, const struct intel_execution_engine *e, bool gpu)
+{
+ struct sigaction act;
+ struct sigaction old_handler;
+ int sigs = 0;
+ int ret;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_sigaction = bad_fault_segv_handler;
+ act.sa_flags = SA_SIGINFO;
+
+ target_buffer = NULL;
+ expected_fault_addr = target_buffer;
+ sigaction(SIGSEGV, &act, &old_handler);
+ ret = sigsetjmp(bad_fault_env, sigs);
+
+ /* Try to write the unmapped zero page */
+ if (!ret) {
+ if (gpu) {
+ store_test(fd, e, 4, 0, 0);
+ } else {
+ *target_buffer = 1337;
+ }
+ }
+
+ sigaction(SIGSEGV, &old_handler, NULL);
+
+ if (ret == 1)
+ /* success, faulted at expected addr */
+ igt_success();
+ else
+ igt_fail(IGT_EXIT_FAILURE);
+
+}
+
+igt_main
+{
+ const struct intel_execution_engine *e;
+ int fd;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_INTEL);
+ devid = intel_get_drm_devid(fd);
+
+ if (intel_gen(devid) < 8) {
+ fprintf(stderr, "SVM only available on BDW+\n");
+ exit(-1);
+ }
+
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ if (!bufmgr) {
+ fprintf(stderr, "failed to init libdrm\n");
+ exit(-1);
+ }
+ }
+
+ igt_subtest_f("bad-fault-cpu")
+ test_bad_fault(fd, e, false);
+
+ for (e = intel_execution_engines; e->name; e++) {
+
+ if (!strcmp(e->name, "default"))
+ continue;
+
+ igt_subtest_f("basic-%s", e->name) {
+ int size = 4096;
+
+ target_buffer = malloc(size);
+ igt_assert_f(target_buffer, "failed to alloc target buffer\n");
+ store_test(fd, e, size, 0, 0);
+ free(target_buffer);
+ }
+
+ igt_subtest_f("basic-bo-cpu-map-%s", e->name) {
+ int size = 4096;
+
+ target_bo = drm_intel_bo_alloc(bufmgr,
+ "target svm buffer",
+ size, size);
+ drm_intel_bo_map(target_bo, true);
+ target_buffer = target_bo->virtual;
+ igt_assert_f(target_buffer, "failed to alloc target buffer\n");
+ store_test(fd, e, size, 0, 0);
+ drm_intel_bo_unmap(target_bo);
+ drm_intel_bo_unreference(target_bo);
+ }
+#if 0
+ igt_subtest_f("bo-gtt-map-%s", e->name) {
+ int size = 4096;
+
+ target_bo = drm_intel_bo_alloc(bufmgr,
+ "target svm buffer",
+ size, size);
+ drm_intel_gem_bo_map_gtt(target_bo);
+ target_buffer = target_bo->virtual;
+ igt_assert_f(target_buffer, "failed to alloc target buffer\n");
+ store_test(fd, e, size, 0, 0);
+ drm_intel_gem_bo_unmap_gtt(target_bo);
+ drm_intel_bo_unreference(target_bo);
+ }
+
+ igt_subtest_f("bad-fault-gpu-%s", e->name)
+ test_bad_fault(fd, e, true);
+#endif
+
+ igt_subtest_f("long-%s", e->name) {
+ int size = 4096*1024;
+
+ target_buffer = malloc(size);
+ igt_assert_f(target_buffer, "failed to alloc target buffer\n");
+ store_test(fd, e, size, 0, 0);
+ free(target_buffer);
+ }
+
+ igt_subtest_f("long-stall-%s", e->name) {
+ int size = 4096*1024;
+
+ target_buffer = malloc(size);
+ igt_assert_f(target_buffer, "failed to alloc target buffer\n");
+ store_test(fd, e, size, 0, 1);
+ store_test(fd, e, size, 0, 2);
+ store_test(fd, e, size, 0, 3);
+ store_test(fd, e, size, 0, 4);
+ store_test(fd, e, size, 0, 5);
+ store_test(fd, e, size, 0, 13);
+ store_test(fd, e, size, 0, 51);
+ store_test(fd, e, size, 0, 97);
+ store_test(fd, e, size, 0, 217);
+ store_test(fd, e, size, 0, 4096);
+ free(target_buffer);
+ }
+ }
+
+ igt_fixture {
+ drm_intel_bufmgr_destroy(bufmgr);
+ }
+}