summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-02-23 22:54:51 +0100
committerJerome Glisse <jglisse@redhat.com>2010-02-23 22:54:51 +0100
commita3aabf9877efbc9b4f208518e7707972deaa08eb (patch)
tree62f684033d366a87c213592c6938010514abd713
parenta6ac6494fd541818f26513dcfab68fcb4765b0b0 (diff)
cleanup the batch logic
-rw-r--r--r600_atom.c14
-rw-r--r--r600_batch.c208
2 files changed, 64 insertions, 158 deletions
diff --git a/r600_atom.c b/r600_atom.c
index ea04252..dc5cd8a 100644
--- a/r600_atom.c
+++ b/r600_atom.c
@@ -18,7 +18,7 @@
#include "r600d.h"
/*
- * drm_r600_framebuffer
+ * framebuffer
*/
int r600_framebuffer_emit(struct r600_winsys *rdev,
struct r600_atom *atom,
@@ -203,7 +203,7 @@ int r600_cb_cntl_create(struct r600_winsys *rdev, struct r600_atom *atom, void *
}
/*
- * r600_pa
+ * rasterizer
*/
int r600_rasterizer_create(struct r600_winsys *rdev, struct r600_atom *atom, void *data)
{
@@ -281,7 +281,7 @@ int r600_rasterizer_create(struct r600_winsys *rdev, struct r600_atom *atom, voi
}
/*
- * pipe_viewport_state
+ * viewport state
*/
int r600_viewport_create(struct r600_winsys *rdev, struct r600_atom *atom, void *data)
{
@@ -310,7 +310,7 @@ int r600_viewport_create(struct r600_winsys *rdev, struct r600_atom *atom, void
}
/*
- * pipe_scissor_state
+ * scissor state
*/
int r600_scissor_create(struct r600_winsys *rdev, struct r600_atom *atom, void *data)
{
@@ -331,7 +331,7 @@ int r600_scissor_create(struct r600_winsys *rdev, struct r600_atom *atom, void *
}
/*
- * r600_blend
+ * blend state
*/
int r600_blend_create(struct r600_winsys *rdev, struct r600_atom *atom, void *data)
{
@@ -439,7 +439,7 @@ int r600_dsa_create(struct r600_winsys *rdev, struct r600_atom *atom, void *data
}
/*
- * r600_vs_shader
+ * vexter shader
*/
int r600_vs_shader_emit(struct r600_winsys *rdev,
struct r600_atom *atom,
@@ -576,7 +576,7 @@ int r600_fs_shader_create(struct r600_winsys *rdev, struct r600_atom *atom, void
}
/*
- * vertex shader input
+ * shader resource
*/
int r600_shader_resource_emit(struct r600_winsys *rdev,
struct r600_atom *atom,
diff --git a/r600_batch.c b/r600_batch.c
index 3a904ce..4b933af 100644
--- a/r600_batch.c
+++ b/r600_batch.c
@@ -52,121 +52,91 @@ static void r600_emit_flush(struct r600_winsys *rdev,
RADEON_GEM_DOMAIN_GTT);
}
-static void r600_emit_resources(struct r600_winsys *rdev,
- struct radeon_ib *ib,
- struct radeon_bo *bo,
- u32 dw0, u32 dw1, u32 dw2, u32 dw3,
- u32 dw4, u32 dw5, u32 dw6, u32 dw7)
-{
- ib->ptr[ib->cpkts++] = PKT3(PKT3_SURFACE_SYNC, 3);
- ib->ptr[ib->cpkts++] = 0x01000000;
- ib->ptr[ib->cpkts++] = radeon_bo_size(bo) >> 8;
- ib->ptr[ib->cpkts++] = 0x00000000;
- ib->ptr[ib->cpkts++] = 0x0000000A;
- ib->ptr[ib->cpkts++] = PKT3(PKT3_NOP, 0);
- ib->ptr[ib->cpkts++] = radeon_ib_reloc(ib, bo, RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT);
- ib->ptr[ib->cpkts++] = PKT3(PKT3_SET_RESOURCE, 7);
- ib->ptr[ib->cpkts++] = dw0;
- ib->ptr[ib->cpkts++] = dw1;
- ib->ptr[ib->cpkts++] = dw2;
- ib->ptr[ib->cpkts++] = dw3;
- ib->ptr[ib->cpkts++] = dw4;
- ib->ptr[ib->cpkts++] = dw5;
- ib->ptr[ib->cpkts++] = dw6;
- ib->ptr[ib->cpkts++] = dw7;
- ib->ptr[ib->cpkts++] = PKT3(PKT3_NOP, 0);
- ib->ptr[ib->cpkts++] = radeon_ib_reloc(ib, bo, RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT);
-}
-
-/*
- * r600_batch
- */
-static int r600_batch_alloc(struct r600_batch **batch)
+void r600_atom_flush_add(struct r600_batch *batch, struct radeon_bo *bo, u32 flags)
{
- struct r600_batch *rbatch;
+ int i;
- *batch = NULL;
- rbatch = malloc(sizeof(*rbatch));
- if (rbatch == NULL)
- return -ENOMEM;
- INIT_LIST_HEAD(&rbatch->list);
- INIT_LIST_HEAD(&rbatch->pre_flushes);
- INIT_LIST_HEAD(&rbatch->post_flushes);
- rbatch->nemit_atoms = 0;
- *batch = rbatch;
- return 0;
+ for (i = 0; i < batch->nflush; i++) {
+ if (batch->flush[i].bo == bo) {
+ batch->flush[i].flags |= flags;
+ return;
+ }
+ }
+ batch->flush[batch->nflush].bo = bo;
+ batch->flush[batch->nflush].flags = flags;
+ batch->nflush++;
}
/*
* r600_batches
*/
-static void r600_batches_clear_locked(struct r600_winsys *rdev, struct r600_batches *batches)
+static void r600_batches_clear_locked(struct r600_winsys *rw, struct r600_batches *batches)
{
struct r600_batch *batch, *n;
- int i;
+ int i, j;
- list_for_each_entry_safe(batch, n, &batches->batches, list) {
- for (i = 0; i < R600_BATCH_NATOMS; i++) {
- if (batch->atoms[i])
- r600_atom_destroy(batch->atoms[i]);
+ for (i = 0; i < batches->nbatch; i++) {
+ for (j = 0; j < R600_BATCH_NATOMS; j++) {
+ r600_atom_destroy(batches->batch[i].atoms[j]);
}
- r600_atom_flush_cleanup(&batch->pre_flushes);
- r600_atom_flush_cleanup(&batch->post_flushes);
- list_del(&batch->list);
- free(batch);
}
- INIT_LIST_HEAD(&batches->batches);
- r700_batches_states_default(rdev, batches);
+ batches->ib->cpkts = 0;
+ batches->nbatch = 0;
+ r700_batches_states_default(rw, batches);
batches->npkts = batches->ib->cpkts;
}
-static int r600_batches_flush_locked(struct r600_winsys *rdev, struct r600_batches *batches)
+int r600_batches_flush(struct r600_winsys *rw)
{
- struct r600_batch *batch;
- struct r600_atom_flush *flush;
- int r, i;
+ struct r600_batches *batches = &rw->batches;
+ int r, i, j;
- list_for_each_entry(batch, &batches->batches, list) {
- list_for_each_entry(flush, &batch->pre_flushes, list) {
- r600_emit_flush(rdev, batches->ib, flush->bo, flush->flags);
+ for (i = 0; i < batches->nbatch; i++) {
+ for (j = 0; j < batches->batch[i].nflush; j++) {
+ r600_emit_flush(rw, batches->ib,
+ batches->batch[i].flush[j].bo,
+ batches->batch[i].flush[j].flags);
}
- for (i = 0; i < batch->nemit_atoms; i++) {
- r = batch->emit_atoms[i]->emit(rdev, batch->emit_atoms[i], batch, batches->ib);
+ for (j = 0; j < batches->batch[i].nemit_atoms; j++) {
+ r = batches->batch[i].emit_atoms[j]->emit(rw,
+ batches->batch[i].emit_atoms[j],
+ &batches->batch[i],
+ batches->ib);
if (r)
goto out_err;
}
- r = r600_draw_cmd_emit(batches->ib, &batch->drm);
+ r = r600_draw_cmd_emit(batches->ib, &batches->batch[i].drm);
/* flush + wait until */
batches->ib->ptr[batches->ib->cpkts++] = PKT3(PKT3_EVENT_WRITE, 0);
batches->ib->ptr[batches->ib->cpkts++] = 0x00000016;
batches->ib->ptr[batches->ib->cpkts++] = PKT3(PKT3_SET_CONFIG_REG, 1);
batches->ib->ptr[batches->ib->cpkts++] = 0x00000010;
batches->ib->ptr[batches->ib->cpkts++] = 0x00028000;
- list_for_each_entry(flush, &batch->post_flushes, list) {
- r600_emit_flush(rdev, batches->ib, flush->bo, flush->flags);
- }
}
- r = radeon_ib_schedule(rdev, batches->ib);
+ printf("ib %d dw\n", batches->ib->cpkts);
+ r = radeon_ib_schedule(rw, batches->ib);
out_err:
/* FIXME helper function */
batches->ib->cpkts = 0;
batches->ib->nrelocs = 0;
- r600_batches_clear_locked(rdev, batches);
+ r600_batches_clear_locked(rw, batches);
return r;
}
-int r600_batches_queue(struct r600_winsys *rdev, struct r600_request *rq)
+int r600_batches_queue(struct r600_winsys *rw, struct r600_request *rq)
{
struct drm_r600_batch *batch = rq->data;
struct r600_batch *rbatch;
- struct r600_batches *batches = &rdev->batches;
+ struct r600_batches *batches = &rw->batches;
int r, i, j;
- r = r600_batch_alloc(&rbatch);
- if (r)
- return r;
+ if (batches->nbatch >= R600_MAX_BATCH) {
+ r = r600_batches_flush(rw);
+ if (r)
+ return r;
+ }
+ rbatch = &batches->batch[batches->nbatch];
+ memset(rbatch, 0, sizeof(struct r600_batch));
i = 0;
if (batch->blend == NULL || batch->cb_cntl == NULL ||
batch->rasterizer == NULL || batch->viewport == NULL ||
@@ -194,9 +164,7 @@ int r600_batches_queue(struct r600_winsys *rdev, struct r600_request *rq)
}
memcpy(&rbatch->drm, batch, sizeof(struct drm_r600_batch));
reprocess:
- r600_atom_flush_cleanup(&rbatch->pre_flushes);
- r600_atom_flush_cleanup(&rbatch->post_flushes);
- rbatch->nflushes = 0;
+ rbatch->nflush = 0;
rbatch->npkts = 0;
/* flush + wait until = 5dw */
rbatch->npkts += 5;
@@ -204,18 +172,14 @@ reprocess:
for (i = 0; i < R600_BATCH_NATOMS; i++) {
if (rbatch->atoms[i]) {
for (j = 0; j < rbatch->atoms[i]->nbo; j++) {
- r = r600_atom_flush_add(&rbatch->pre_flushes, rbatch->atoms[i]->bo[j], rbatch->atoms[i]->flags[j]);
- if (r < 0)
- goto out_err;
+ r600_atom_flush_add(rbatch, rbatch->atoms[i]->bo[j], rbatch->atoms[i]->flags[j]);
}
- rbatch->nflushes += rbatch->atoms[i]->nbo;
rbatch->emit_atoms[rbatch->nemit_atoms++] = rbatch->atoms[i];
+ rbatch->npkts += rbatch->atoms[i]->npkts;
}
}
/* add flush */
- rbatch->npkts += rbatch->nflushes * 7;
- /* FIXME shader flush should be conditional only if we change shaders */
- rbatch->npkts += 7;
+ rbatch->npkts += rbatch->nflush * 7;
/* if batch is bigger than ib size it's an invalid one, this should
* not happen
*/
@@ -226,7 +190,7 @@ reprocess:
}
/* flush or not ? */
if (batches->npkts + rbatch->npkts > batches->ib->length_dw) {
- r = r600_batches_flush_locked(rdev, batches);
+ r = r600_batches_flush(rw);
if (r)
goto out_err;
goto reprocess;
@@ -237,9 +201,9 @@ reprocess:
batches->last_id[i] = rbatch->atoms[i]->id;
}
}
- r600_winsys_set_bo_list(rdev, rq->nbo, rq->bo);
+ printf("batch %d dw batches with %d dw\n", rbatch->npkts, batches->npkts);
batches->npkts += rbatch->npkts;
- list_add_tail(&rbatch->list, &batches->batches);
+ batches->nbatch++;
return 0;
out_err:
for (i = 0; i < R600_BATCH_NATOMS; i++) {
@@ -250,35 +214,25 @@ out_err:
return r;
}
-static int r600_batches_init(struct r600_winsys *rdev, struct r600_batches *batches)
+static int r600_batches_init(struct r600_winsys *rw, struct r600_batches *batches)
{
int r;
memset(batches, 0 , sizeof(struct r600_batches));
- INIT_LIST_HEAD(&batches->batches);
- r = radeon_ib_get(rdev, &batches->ib);
+ r = radeon_ib_get(rw, &batches->ib);
if (r)
return r;
- r700_batches_states_default(rdev, batches);
- batches->npkts = batches->ib->cpkts;
+ r600_batches_clear_locked(rw, batches);
return 0;
}
-static void r600_batches_cleanup_locked(struct r600_winsys *rdev, struct r600_batches *batches)
+static void r600_batches_cleanup_locked(struct r600_winsys *rw, struct r600_batches *batches)
{
- r600_batches_clear_locked(rdev, batches);
+ r600_batches_clear_locked(rw, batches);
radeon_ib_free(batches->ib);
batches->ib = NULL;
}
-int r600_batches_flush(struct r600_winsys *rdev)
-{
- int r;
-
- r = r600_batches_flush_locked(rdev, &rdev->batches);
- return r;
-}
-
int r600_atoms_init(struct r600_winsys *rdev)
{
rdev->npipes = 2;
@@ -354,60 +308,12 @@ struct r600_atom *r600_atom_create(struct r600_winsys *rdev, struct r600_request
return atom;
}
-void r600_atom_flush_cleanup(struct list_head *flushes)
-{
- struct r600_atom_flush *i, *n;
-
- list_for_each_entry_safe(i, n, flushes, list) {
- list_del(&i->list);
- free(i);
- }
-}
-
-int r600_atom_flush_add(struct list_head *flushes, struct radeon_bo *bo, u32 flags)
-{
- struct r600_atom_flush *i;
-
- list_for_each_entry(i, flushes, list) {
- if (i->bo->handle == bo->handle) {
- i->flags |= flags;
- return 0;
- }
- }
- i = malloc(sizeof(*i));
- if (i == NULL)
- return -ENOMEM;
- i->bo = bo;
- i->flags = flags;
- list_add_tail(&i->list, flushes);
- return 1;
-}
-
int r600_atom_emit_default(struct r600_winsys *rdev, struct r600_atom *atom,
void *data, struct radeon_ib *ib)
{
return radeon_ib_copy(ib, atom->pkts, atom->npkts);
}
-struct radeon_bo *radeon_bo_lookup(struct r600_winsys *rdev, u32 handle)
-{
- int i;
-
- for (i = 0; i < rdev->nbo; i++) {
- if (rdev->bo[i] && rdev->bo[i]->handle == handle) {
- radeon_bo_ref(rdev->bo[i]);
- return rdev->bo[i];
- }
- }
- return NULL;
-}
-
-void r600_winsys_set_bo_list(struct r600_winsys *rdev, u32 nbo, struct radeon_bo **bo)
-{
- memcpy(rdev->bo, bo, sizeof(void*) * nbo);
- rdev->nbo = nbo;
-}
-
void r600_atom_ref(struct r600_atom *atom)
{
if (atom == NULL)