summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-07-20 20:34:23 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2012-07-21 00:01:59 +0100
commita0d95a9c2d3a27eafbe459e2aefe772c006e596f (patch)
tree9bf923891902a73880e79d1ac392dfee195ae827
parentc52d265b83b033fb2a275fcc9a8a8d146e3afdf6 (diff)
sna: Only update a buffer when it becomes dirty
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/gen2_render.c2
-rw-r--r--src/sna/gen3_render.c2
-rw-r--r--src/sna/gen4_render.c4
-rw-r--r--src/sna/gen5_render.c4
-rw-r--r--src/sna/gen6_render.c4
-rw-r--r--src/sna/gen7_render.c4
-rw-r--r--src/sna/kgem.c11
-rw-r--r--src/sna/kgem.h9
8 files changed, 21 insertions, 19 deletions
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 04c351c9..b65454d8 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -547,7 +547,7 @@ static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
assert(sna->render_state.gen2.vertex_offset == 0);
if (sna->render_state.gen2.target == op->dst.bo->unique_id) {
- kgem_bo_mark_dirty(op->dst.bo);
+ kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
return;
}
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 1f6c1aa8..18c5d852 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1373,7 +1373,7 @@ static void gen3_emit_target(struct sna *sna,
state->current_dst = bo->unique_id;
}
- kgem_bo_mark_dirty(bo);
+ kgem_bo_mark_dirty(&sna->kgem, bo);
}
static void gen3_emit_composite_state(struct sna *sna,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 6fcce712..de6c8c48 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -732,7 +732,7 @@ gen4_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(bo);
+ kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;
@@ -1457,7 +1457,7 @@ gen4_emit_state(struct sna *sna,
kgem_bo_is_dirty(op->mask.bo)));
OUT_BATCH(MI_FLUSH);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(op->dst.bo);
+ kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
}
}
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index d776e775..db7eb7b8 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -726,7 +726,7 @@ gen5_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(bo);
+ kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;
@@ -1472,7 +1472,7 @@ gen5_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
OUT_BATCH(MI_FLUSH);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(op->dst.bo);
+ kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
}
}
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d4783e06..c292da1e 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -914,7 +914,7 @@ gen6_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen6_emit_flush(sna);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(op->dst.bo);
+ kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
need_stall = false;
}
if (need_stall) {
@@ -1246,7 +1246,7 @@ gen6_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(bo);
+ kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c041d666..ae0aa9d3 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1048,7 +1048,7 @@ gen7_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen7_emit_pipe_invalidate(sna, need_stall);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(op->dst.bo);
+ kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
need_stall = false;
}
if (need_stall)
@@ -1355,7 +1355,7 @@ gen7_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(bo);
+ kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 66a23bf9..577fa6c0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -349,9 +349,10 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
assert(list_is_empty(&bo->vma));
bo->rq = NULL;
list_del(&bo->request);
+
+ bo->needs_flush = false;
}
- bo->needs_flush = false;
bo->domain = DOMAIN_NONE;
}
@@ -3494,12 +3495,8 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
kgem->reloc[index].target_handle = bo->handle;
kgem->reloc[index].presumed_offset = bo->presumed_offset;
- if (read_write_domain & 0x7fff) {
- DBG(("%s: marking handle=%d dirty\n",
- __FUNCTION__, bo->handle));
- bo->needs_flush = bo->dirty = true;
- list_move(&bo->request, &kgem->next_request->buffers);
- }
+ if (read_write_domain & 0x7ff)
+ kgem_bo_mark_dirty(kgem, bo);
delta += bo->presumed_offset;
} else {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 533a9196..165e7b96 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -536,10 +536,15 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
return bo->dirty;
}
-static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
+static inline void kgem_bo_mark_dirty(struct kgem *kgem, struct kgem_bo *bo)
{
+ if (bo->dirty)
+ return;
+
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
- bo->dirty = true;
+
+ bo->needs_flush = bo->dirty = true;
+ list_move(&bo->request, &kgem->next_request->buffers);
}
void kgem_sync(struct kgem *kgem);