summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/qxl
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-03-16 16:45:12 +1000
committerDave Airlie <airlied@redhat.com>2021-03-16 17:08:46 +1000
commit51c3b916a4d7e24b4918925965867fdd9bd8dd59 (patch)
tree3257e3e0fda7fbb0fe1425177b0c661db1bfee63 /drivers/gpu/drm/qxl
parent1e28eed17697bcf343c6743f0028cc3b5dd88bf0 (diff)
parent762949bb1da78941b25e63f7e952af037eee15a9 (diff)
Merge tag 'drm-misc-next-2021-03-03' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.13: UAPI Changes: Cross-subsystem Changes: Core Changes: - %p4cc printk format modifier - atomic: introduce drm_crtc_commit_wait, rework atomic plane state helpers to take the drm_commit_state structure - dma-buf: heaps rework to return a struct dma_buf - simple-kms: Add plate state helpers - ttm: debugfs support, removal of sysfs Driver Changes: - Convert drivers to shadow plane helpers - arc: Move to drm/tiny - ast: cursor plane reworks - gma500: Remove TTM and medfield support - mxsfb: imx8mm support - panfrost: MMU IRQ handling rework - qxl: rework to better handle resources deallocation, locking - sun4i: Add alpha properties for UI and VI layers - vc4: RPi4 CEC support - vmwgfx: doc cleanup Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20210303100600.dgnkadonzuvfnu22@gilmour
Diffstat (limited to 'drivers/gpu/drm/qxl')
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c368
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c30
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c57
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c76
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c19
14 files changed, 336 insertions, 249 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 54e3c3a97440..7b00c955cd82 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -254,6 +254,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
}
}
+ wake_up_all(&qdev->release_event);
DRM_DEBUG_DRIVER("%d\n", i);
return i;
@@ -268,7 +269,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
- false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 10738e04c09b..a7637e79cb42 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -464,25 +464,26 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
};
static int qxl_primary_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo;
- if (!state->crtc || !state->fb)
+ if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
- bo = gem_to_qxl_bo(state->fb->obj[0]);
+ bo = gem_to_qxl_bo(new_plane_state->fb->obj[0]);
return qxl_check_framebuffer(qdev, bo);
}
-static int qxl_primary_apply_cursor(struct drm_plane *plane)
+static int qxl_primary_apply_cursor(struct qxl_device *qdev,
+ struct drm_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = to_qxl(dev);
- struct drm_framebuffer *fb = plane->state->fb;
- struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
int ret = 0;
@@ -506,8 +507,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
- cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
+ cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
+ cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
@@ -524,17 +525,126 @@ out_free_release:
return ret;
}
+static int qxl_primary_move_cursor(struct qxl_device *qdev,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
+ struct qxl_cursor_cmd *cmd;
+ struct qxl_release *release;
+ int ret = 0;
+
+ if (!qcrtc->cursor_bo)
+ return 0;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+ QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_MOVE;
+ cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
+ cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ return ret;
+}
+
+static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev,
+ struct qxl_bo *user_bo,
+ int hot_x, int hot_y)
+{
+ static const u32 size = 64 * 64 * 4;
+ struct qxl_bo *cursor_bo;
+ struct dma_buf_map cursor_map;
+ struct dma_buf_map user_map;
+ struct qxl_cursor cursor;
+ int ret;
+
+ if (!user_bo)
+ return NULL;
+
+ ret = qxl_bo_create(qdev, sizeof(struct qxl_cursor) + size,
+ false, true, QXL_GEM_DOMAIN_VRAM, 1,
+ NULL, &cursor_bo);
+ if (ret)
+ goto err;
+
+ ret = qxl_bo_vmap(cursor_bo, &cursor_map);
+ if (ret)
+ goto err_unref;
+
+ ret = qxl_bo_vmap(user_bo, &user_map);
+ if (ret)
+ goto err_unmap;
+
+ cursor.header.unique = 0;
+ cursor.header.type = SPICE_CURSOR_TYPE_ALPHA;
+ cursor.header.width = 64;
+ cursor.header.height = 64;
+ cursor.header.hot_spot_x = hot_x;
+ cursor.header.hot_spot_y = hot_y;
+ cursor.data_size = size;
+ cursor.chunk.next_chunk = 0;
+ cursor.chunk.prev_chunk = 0;
+ cursor.chunk.data_size = size;
+ if (cursor_map.is_iomem) {
+ memcpy_toio(cursor_map.vaddr_iomem,
+ &cursor, sizeof(cursor));
+ memcpy_toio(cursor_map.vaddr_iomem + sizeof(cursor),
+ user_map.vaddr, size);
+ } else {
+ memcpy(cursor_map.vaddr,
+ &cursor, sizeof(cursor));
+ memcpy(cursor_map.vaddr + sizeof(cursor),
+ user_map.vaddr, size);
+ }
+
+ qxl_bo_vunmap(user_bo);
+ qxl_bo_vunmap(cursor_bo);
+ return cursor_bo;
+
+err_unmap:
+ qxl_bo_vunmap(cursor_bo);
+err_unref:
+ qxl_bo_unpin(cursor_bo);
+ qxl_bo_unref(&cursor_bo);
+err:
+ return NULL;
+}
+
+static void qxl_free_cursor(struct qxl_bo *cursor_bo)
+{
+ if (!cursor_bo)
+ return;
+
+ qxl_bo_unpin(cursor_bo);
+ qxl_bo_unref(&cursor_bo);
+}
+
static void qxl_primary_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
- struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
+ struct qxl_bo *bo = gem_to_qxl_bo(new_state->fb->obj[0]);
struct qxl_bo *primary;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
- .x2 = plane->state->fb->width,
- .y2 = plane->state->fb->height
+ .x2 = new_state->fb->width,
+ .y2 = new_state->fb->height
};
uint32_t dumb_shadow_offset = 0;
@@ -544,25 +654,29 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
if (qdev->primary_bo)
qxl_io_destroy_primary(qdev);
qxl_io_create_primary(qdev, primary);
- qxl_primary_apply_cursor(plane);
+ qxl_primary_apply_cursor(qdev, plane->state);
}
if (bo->is_dumb)
dumb_shadow_offset =
- qdev->dumb_heads[plane->state->crtc->index].x;
+ qdev->dumb_heads[new_state->crtc->index].x;
- qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1,
+ qxl_draw_dirty_fb(qdev, new_state->fb, bo, 0, 0, &norect, 1, 1,
dumb_shadow_offset);
}
static void qxl_primary_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
if (old_state->fb) {
struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
+ if (bo->shadow)
+ bo = bo->shadow;
if (bo->is_primary) {
qxl_io_destroy_primary(qdev);
bo->is_primary = false;
@@ -571,126 +685,29 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
}
static void qxl_cursor_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = to_qxl(dev);
- struct drm_framebuffer *fb = plane->state->fb;
- struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
- struct qxl_release *release;
- struct qxl_cursor_cmd *cmd;
- struct qxl_cursor *cursor;
- struct drm_gem_object *obj;
- struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
- int ret;
- struct dma_buf_map user_map;
- struct dma_buf_map cursor_map;
- void *user_ptr;
- int size = 64*64*4;
-
- ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
- QXL_RELEASE_CURSOR_CMD,
- &release, NULL);
- if (ret)
- return;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct qxl_device *qdev = to_qxl(plane->dev);
+ struct drm_framebuffer *fb = new_state->fb;
if (fb != old_state->fb) {
- obj = fb->obj[0];
- user_bo = gem_to_qxl_bo(obj);
-
- /* pinning is done in the prepare/cleanup framevbuffer */
- ret = qxl_bo_kmap(user_bo, &user_map);
- if (ret)
- goto out_free_release;
- user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */
-
- ret = qxl_alloc_bo_reserved(qdev, release,
- sizeof(struct qxl_cursor) + size,
- &cursor_bo);
- if (ret)
- goto out_kunmap;
-
- ret = qxl_bo_pin(cursor_bo);
- if (ret)
- goto out_free_bo;
-
- ret = qxl_release_reserve_list(release, true);
- if (ret)
- goto out_unpin;
-
- ret = qxl_bo_kmap(cursor_bo, &cursor_map);
- if (ret)
- goto out_backoff;
- if (cursor_map.is_iomem) /* TODO: Use mapping abstraction properly */
- cursor = (struct qxl_cursor __force *)cursor_map.vaddr_iomem;
- else
- cursor = (struct qxl_cursor *)cursor_map.vaddr;
-
- cursor->header.unique = 0;
- cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
- cursor->header.width = 64;
- cursor->header.height = 64;
- cursor->header.hot_spot_x = fb->hot_x;
- cursor->header.hot_spot_y = fb->hot_y;
- cursor->data_size = size;
- cursor->chunk.next_chunk = 0;
- cursor->chunk.prev_chunk = 0;
- cursor->chunk.data_size = size;
- memcpy(cursor->chunk.data, user_ptr, size);
- qxl_bo_kunmap(cursor_bo);
- qxl_bo_kunmap(user_bo);
-
- cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
- cmd->u.set.visible = 1;
- cmd->u.set.shape = qxl_bo_physical_address(qdev,
- cursor_bo, 0);
- cmd->type = QXL_CURSOR_SET;
-
- old_cursor_bo = qcrtc->cursor_bo;
- qcrtc->cursor_bo = cursor_bo;
- cursor_bo = NULL;
+ qxl_primary_apply_cursor(qdev, new_state);
} else {
-
- ret = qxl_release_reserve_list(release, true);
- if (ret)
- goto out_free_release;
-
- cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
- cmd->type = QXL_CURSOR_MOVE;
+ qxl_primary_move_cursor(qdev, new_state);
}
-
- cmd->u.position.x = plane->state->crtc_x + fb->hot_x;
- cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
-
- qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_release_fence_buffer_objects(release);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
-
- if (old_cursor_bo != NULL)
- qxl_bo_unpin(old_cursor_bo);
- qxl_bo_unref(&old_cursor_bo);
- qxl_bo_unref(&cursor_bo);
-
- return;
-
-out_backoff:
- qxl_release_backoff_reserve_list(release);
-out_unpin:
- qxl_bo_unpin(cursor_bo);
-out_free_bo:
- qxl_bo_unref(&cursor_bo);
-out_kunmap:
- qxl_bo_kunmap(user_bo);
-out_free_release:
- qxl_release_free(qdev, release);
- return;
-
}
static void qxl_cursor_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
+ struct qxl_crtc *qcrtc;
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
int ret;
@@ -713,6 +730,10 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+
+ qcrtc = to_qxl_crtc(old_state->crtc);
+ qxl_free_cursor(qcrtc->cursor_bo);
+ qcrtc->cursor_bo = NULL;
}
static void qxl_update_dumb_head(struct qxl_device *qdev,
@@ -770,13 +791,45 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
DRM_DEBUG("%dx%d\n", surf->width, surf->height);
}
+static void qxl_prepare_shadow(struct qxl_device *qdev, struct qxl_bo *user_bo,
+ int crtc_index)
+{
+ struct qxl_surface surf;
+
+ qxl_update_dumb_head(qdev, crtc_index,
+ user_bo);
+ qxl_calc_dumb_shadow(qdev, &surf);
+ if (!qdev->dumb_shadow_bo ||
+ qdev->dumb_shadow_bo->surf.width != surf.width ||
+ qdev->dumb_shadow_bo->surf.height != surf.height) {
+ if (qdev->dumb_shadow_bo) {
+ drm_gem_object_put
+ (&qdev->dumb_shadow_bo->tbo.base);
+ qdev->dumb_shadow_bo = NULL;
+ }
+ qxl_bo_create(qdev, surf.height * surf.stride,
+ true, true, QXL_GEM_DOMAIN_SURFACE, 0,
+ &surf, &qdev->dumb_shadow_bo);
+ }
+ if (user_bo->shadow != qdev->dumb_shadow_bo) {
+ if (user_bo->shadow) {
+ qxl_bo_unpin(user_bo->shadow);
+ drm_gem_object_put
+ (&user_bo->shadow->tbo.base);
+ user_bo->shadow = NULL;
+ }
+ drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
+ user_bo->shadow = qdev->dumb_shadow_bo;
+ qxl_bo_pin(user_bo->shadow);
+ }
+}
+
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
- struct qxl_surface surf;
if (!new_state->fb)
return 0;
@@ -786,30 +839,18 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
user_bo->is_dumb) {
- qxl_update_dumb_head(qdev, new_state->crtc->index,
- user_bo);
- qxl_calc_dumb_shadow(qdev, &surf);
- if (!qdev->dumb_shadow_bo ||
- qdev->dumb_shadow_bo->surf.width != surf.width ||
- qdev->dumb_shadow_bo->surf.height != surf.height) {
- if (qdev->dumb_shadow_bo) {
- drm_gem_object_put
- (&qdev->dumb_shadow_bo->tbo.base);
- qdev->dumb_shadow_bo = NULL;
- }
- qxl_bo_create(qdev, surf.height * surf.stride,
- true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
- &qdev->dumb_shadow_bo);
- }
- if (user_bo->shadow != qdev->dumb_shadow_bo) {
- if (user_bo->shadow) {
- drm_gem_object_put
- (&user_bo->shadow->tbo.base);
- user_bo->shadow = NULL;
- }
- drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
- user_bo->shadow = qdev->dumb_shadow_bo;
- }
+ qxl_prepare_shadow(qdev, user_bo, new_state->crtc->index);
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ plane->state->fb != new_state->fb) {
+ struct qxl_crtc *qcrtc = to_qxl_crtc(new_state->crtc);
+ struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
+
+ qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
+ new_state->fb->hot_x,
+ new_state->fb->hot_y);
+ qxl_free_cursor(old_cursor_bo);
}
return qxl_bo_pin(user_bo);
@@ -834,6 +875,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
qxl_bo_unpin(user_bo);
if (old_state->fb != plane->state->fb && user_bo->shadow) {
+ qxl_bo_unpin(user_bo->shadow);
drm_gem_object_put(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
@@ -1155,12 +1197,10 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
}
qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
- ret = qxl_bo_pin(qdev->monitors_config_bo);
+ ret = qxl_bo_vmap(qdev->monitors_config_bo, &map);
if (ret)
return ret;
- qxl_bo_kmap(qdev->monitors_config_bo, &map);
-
qdev->monitors_config = qdev->monitors_config_bo->kptr;
qdev->ram_header->monitors_config =
qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
@@ -1179,11 +1219,13 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
{
int ret;
+ if (!qdev->monitors_config_bo)
+ return 0;
+
qdev->monitors_config = NULL;
qdev->ram_header->monitors_config = 0;
- qxl_bo_kunmap(qdev->monitors_config_bo);
- ret = qxl_bo_unpin(qdev->monitors_config_bo);
+ ret = qxl_bo_vunmap(qdev->monitors_config_bo);
if (ret)
return ret;
@@ -1196,7 +1238,9 @@ int qxl_modeset_init(struct qxl_device *qdev)
int i;
int ret;
- drm_mode_config_init(&qdev->ddev);
+ ret = drmm_mode_config_init(&qdev->ddev);
+ if (ret)
+ return ret;
ret = qxl_create_monitors_object(qdev);
if (ret)
@@ -1228,6 +1272,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
void qxl_modeset_fini(struct qxl_device *qdev)
{
+ if (qdev->dumb_shadow_bo) {
+ qxl_bo_unpin(qdev->dumb_shadow_bo);
+ drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
+ qdev->dumb_shadow_bo = NULL;
+ }
qxl_destroy_monitors_object(qdev);
- drm_mode_config_cleanup(&qdev->ddev);
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 7b7acb910780..7d27891e87fa 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -48,7 +48,7 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_clip_rects *dev_clips;
int ret;
- ret = qxl_bo_kmap(clips_bo, &map);
+ ret = qxl_bo_vmap_locked(clips_bo, &map);
if (ret)
return NULL;
dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -202,7 +202,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
- ret = qxl_bo_kmap(bo, &surface_map);
+ ret = qxl_bo_vmap_locked(bo, &surface_map);
if (ret)
goto out_release_backoff;
surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -210,7 +210,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
ret = qxl_image_init(qdev, release, dimage, surface_base,
left - dumb_shadow_offset,
top, width, height, depth, stride);
- qxl_bo_kunmap(bo);
+ qxl_bo_vunmap_locked(bo);
if (ret)
goto out_release_backoff;
@@ -247,7 +247,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
rects[i].top = clips_ptr->y1;
rects[i].bottom = clips_ptr->y2;
}
- qxl_bo_kunmap(clips_bo);
+ qxl_bo_vunmap_locked(clips_bo);
qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 83b54f0dad61..6dd57cfb2e7c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -125,7 +125,7 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
struct qxl_memslot {
@@ -214,6 +214,8 @@ struct qxl_device {
spinlock_t release_lock;
struct idr release_idr;
uint32_t release_seqno;
+ atomic_t release_count;
+ wait_queue_head_t release_event;
spinlock_t release_idr_lock;
struct mutex async_io_mutex;
unsigned int last_sent_io_cmd;
@@ -335,7 +337,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index c04cd5a2553c..48a58ba1db96 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -59,7 +59,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
surf.stride = pitch;
surf.format = format;
r = qxl_gem_object_create_with_handle(qdev, file_priv,
- QXL_GEM_DOMAIN_SURFACE,
+ QXL_GEM_DOMAIN_CPU,
args->size, &surf, &qobj,
&handle);
if (r)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 48e096285b4c..a08da0bd9098 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
- r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
+ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 60ab7151b84d..ffff54e5fb31 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -186,7 +186,7 @@ qxl_image_init_helper(struct qxl_device *qdev,
}
}
}
- qxl_bo_kunmap(chunk_bo);
+ qxl_bo_vunmap_locked(chunk_bo);
image_bo = dimage->bo;
ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index ddf6588a2a38..d312322cacd1 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -87,6 +87,7 @@ int qxl_irq_init(struct qxl_device *qdev)
init_waitqueue_head(&qdev->display_event);
init_waitqueue_head(&qdev->cursor_event);
init_waitqueue_head(&qdev->io_cmd_event);
+ init_waitqueue_head(&qdev->release_event);
INIT_WORK(&qdev->client_monitors_config_work,
qxl_client_monitors_config_work_func);
atomic_set(&qdev->irq_received, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 4a60a52ab62e..4dc5ad13f12c 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -286,11 +286,35 @@ vram_mapping_free:
void qxl_device_fini(struct qxl_device *qdev)
{
- qxl_bo_unref(&qdev->current_release_bo[0]);
- qxl_bo_unref(&qdev->current_release_bo[1]);
+ int cur_idx;
+
+ /* check if qxl_device_init() was successful (gc_work is initialized last) */
+ if (!qdev->gc_work.func)
+ return;
+
+ for (cur_idx = 0; cur_idx < 3; cur_idx++) {
+ if (!qdev->current_release_bo[cur_idx])
+ continue;
+ qxl_bo_unpin(qdev->current_release_bo[cur_idx]);
+ qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+ qdev->current_release_bo_offset[cur_idx] = 0;
+ qdev->current_release_bo[cur_idx] = NULL;
+ }
+
+ /*
+ * Ask host to release resources (+fill release ring),
+ * then wait for the release actually happening.
+ */
+ qxl_io_notify_oom(qdev);
+ wait_event_timeout(qdev->release_event,
+ atomic_read(&qdev->release_count) == 0,
+ HZ);
+ flush_work(&qdev->gc_work);
+ qxl_surf_evict(qdev);
+ qxl_vram_evict(qdev);
+
qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
- flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index ceebc5881f68..6e26d70f2f07 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -29,6 +29,9 @@
#include "qxl_drv.h"
#include "qxl_object.h"
+static int __qxl_bo_pin(struct qxl_bo *bo);
+static void __qxl_bo_unpin(struct qxl_bo *bo);
+
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct qxl_bo *bo;
@@ -103,8 +106,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
.print_info = drm_gem_ttm_print_info,
};
-int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, bool pinned, u32 domain,
+int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
+ bool kernel, bool pinned, u32 domain, u32 priority,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -137,9 +140,10 @@ int qxl_bo_create(struct qxl_device *qdev,
qxl_ttm_placement_from_domain(bo, domain);
+ bo->tbo.priority = priority;
r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, &ctx, size,
- NULL, NULL, &qxl_ttm_bo_destroy);
+ &bo->placement, 0, &ctx, NULL, NULL,
+ &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(qdev->ddev.dev,
@@ -154,10 +158,12 @@ int qxl_bo_create(struct qxl_device *qdev,
return 0;
}
-int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map)
+int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map)
{
int r;
+ dma_resv_assert_held(bo->tbo.base.resv);
+
if (bo->kptr) {
bo->map_count++;
goto out;
@@ -178,6 +184,25 @@ out:
return 0;
}
+int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map)
+{
+ int r;
+
+ r = qxl_bo_reserve(bo);
+ if (r)
+ return r;
+
+ r = __qxl_bo_pin(bo);
+ if (r) {
+ qxl_bo_unreserve(bo);
+ return r;
+ }
+
+ r = qxl_bo_vmap_locked(bo, map);
+ qxl_bo_unreserve(bo);
+ return r;
+}
+
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
@@ -202,7 +227,7 @@ fallback:
return rptr;
}
- ret = qxl_bo_kmap(bo, &bo_map);
+ ret = qxl_bo_vmap_locked(bo, &bo_map);
if (ret)
return NULL;
rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -211,8 +236,10 @@ fallback:
return rptr;
}
-void qxl_bo_kunmap(struct qxl_bo *bo)
+void qxl_bo_vunmap_locked(struct qxl_bo *bo)
{
+ dma_resv_assert_held(bo->tbo.base.resv);
+
if (bo->kptr == NULL)
return;
bo->map_count--;
@@ -222,6 +249,20 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
ttm_bo_vunmap(&bo->tbo, &bo->map);
}
+int qxl_bo_vunmap(struct qxl_bo *bo)
+{
+ int r;
+
+ r = qxl_bo_reserve(bo);
+ if (r)
+ return r;
+
+ qxl_bo_vunmap_locked(bo);
+ __qxl_bo_unpin(bo);
+ qxl_bo_unreserve(bo);
+ return 0;
+}
+
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
@@ -232,7 +273,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
io_mapping_unmap_atomic(pmap);
return;
fallback:
- qxl_bo_kunmap(bo);
+ qxl_bo_vunmap_locked(bo);
}
void qxl_bo_unref(struct qxl_bo **bo)
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index e60a8f88e226..ee9c29de4d3d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -61,10 +61,13 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
bool kernel, bool pinned, u32 domain,
+ u32 priority,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr);
-extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
-extern void qxl_bo_kunmap(struct qxl_bo *bo);
+int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map);
+int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map);
+int qxl_bo_vunmap(struct qxl_bo *bo);
+void qxl_bo_vunmap_locked(struct qxl_bo *bo);
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 4aa949799446..0628d1cc91fe 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
struct qxl_bo *bo = gem_to_qxl_bo(obj);
int ret;
- ret = qxl_bo_kmap(bo, map);
+ ret = qxl_bo_vmap(bo, map);
if (ret < 0)
return ret;
@@ -71,7 +71,7 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- qxl_bo_kunmap(bo);
+ qxl_bo_vunmap(bo);
}
int qxl_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b372455e2729..f5845c96d414 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -58,56 +58,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
- struct qxl_release *release;
- int count = 0, sc = 0;
- bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
- release = container_of(fence, struct qxl_release, base);
- have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
-
-retry:
- sc++;
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
-
- qxl_io_notify_oom(qdev);
-
- for (count = 0; count < 11; count++) {
- if (!qxl_queue_garbage_collect(qdev, true))
- break;
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
- }
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
-
- if (have_drawable_releases || sc < 4) {
- if (sc > 2)
- /* back off */
- usleep_range(500, 1000);
- if (time_after(jiffies, end))
- return 0;
-
- if (have_drawable_releases && sc > 300) {
- DMA_FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
- goto signaled;
- }
- goto retry;
- }
- /*
- * yeah, original sync_obj_wait gave up after 3 spins when
- * have_drawable_releases is not set.
- */
+ if (!wait_event_timeout(qdev->release_event,
+ (dma_fence_is_signaled(fence) ||
+ (qxl_io_notify_oom(qdev), 0)),
+ timeout))
+ return 0;
-signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
@@ -196,14 +156,16 @@ qxl_release_free(struct qxl_device *qdev,
qxl_release_free_list(release);
kfree(release);
}
+ atomic_dec(&qdev->release_count);
}
static int qxl_release_bo_alloc(struct qxl_device *qdev,
- struct qxl_bo **bo)
+ struct qxl_bo **bo,
+ u32 priority)
{
/* pin releases bo's they are too messy to evict */
return qxl_bo_create(qdev, PAGE_SIZE, false, true,
- QXL_GEM_DOMAIN_VRAM, NULL, bo);
+ QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
}
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
@@ -326,13 +288,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int ret = 0;
union qxl_release_info *info;
int cur_idx;
+ u32 priority;
- if (type == QXL_RELEASE_DRAWABLE)
+ if (type == QXL_RELEASE_DRAWABLE) {
cur_idx = 0;
- else if (type == QXL_RELEASE_SURFACE_CMD)
+ priority = 0;
+ } else if (type == QXL_RELEASE_SURFACE_CMD) {
cur_idx = 1;
- else if (type == QXL_RELEASE_CURSOR_CMD)
+ priority = 1;
+ } else if (type == QXL_RELEASE_CURSOR_CMD) {
cur_idx = 2;
+ priority = 1;
+ }
else {
DRM_ERROR("got illegal type: %d\n", type);
return -EINVAL;
@@ -344,6 +311,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
*rbo = NULL;
return idr_ret;
}
+ atomic_inc(&qdev->release_count);
mutex_lock(&qdev->release_mutex);
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
@@ -352,7 +320,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
qdev->current_release_bo[cur_idx] = NULL;
}
if (!qdev->current_release_bo[cur_idx]) {
- ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+ ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
if (ret) {
mutex_unlock(&qdev->release_mutex);
if (free_bo) {
@@ -437,7 +405,7 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -458,7 +426,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
@@ -467,7 +435,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 33c09dc94f8b..b7f77eb685cb 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -36,7 +36,7 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev)
{
struct qxl_mman *mman;
struct qxl_device *qdev;
@@ -69,7 +69,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -98,8 +98,7 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
/*
* TTM backend functions.
*/
-static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm)
+static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
@@ -170,7 +169,7 @@ static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
qxl_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver qxl_bo_driver = {
+static struct ttm_device_funcs qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
.ttm_tt_destroy = &qxl_ttm_backend_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -193,10 +192,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
- qdev->ddev.anon_inode->i_mapping,
- qdev->ddev.vma_offset_manager,
- false, false);
+ r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
+ qdev->ddev.anon_inode->i_mapping,
+ qdev->ddev.vma_offset_manager,
+ false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -227,7 +226,7 @@ void qxl_ttm_fini(struct qxl_device *qdev)
{
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
- ttm_bo_device_release(&qdev->mman.bdev);
+ ttm_device_fini(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n");
}