summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Whitwell <keith@tungstengraphics.com>2007-03-15 11:22:38 +0000
committerKeith Whitwell <keith@tungstengraphics.com>2007-03-15 11:22:38 +0000
commit891050ea33c71aec14bd65fc42eef95d4b459013 (patch)
treeb47133a2f22c67ac87c6a6876dac46a77acc2829
parent7d35d981653dd6c6cf51cd5e862b1afb5ae4c8e0 (diff)
parentc5cf7073859dd91e3ff6a2a693eb347faf76dd49 (diff)
Merge branch 'index-swtnl-0.1-origin' into index-swtnl-0.1
Conflicts: src/mesa/drivers/dri/i915tex/i915_context.h src/mesa/drivers/dri/i915tex/i915_state.c src/mesa/drivers/dri/i915tex/i915_vtbl.c src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
-rw-r--r--src/mesa/drivers/dri/Makefile.template2
-rw-r--r--src/mesa/drivers/dri/i915tex/i830_vtbl.c2
-rw-r--r--src/mesa/drivers/dri/i915tex/i915_state_fp.c11
-rw-r--r--src/mesa/drivers/dri/i915tex/i915_state_immediate.c8
-rw-r--r--src/mesa/drivers/dri/i915tex/i915_state_indirect.c68
-rw-r--r--src/mesa/drivers/dri/i915tex/i915_state_invarient.c15
-rw-r--r--src/mesa/drivers/dri/i915tex/i915_vtbl.c205
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_batchbuffer.c128
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_batchbuffer.h91
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_context.c2
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_idx_render.c429
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_idx_render.h37
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_state_callbacks.c2
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_tris.c17
14 files changed, 478 insertions, 539 deletions
diff --git a/src/mesa/drivers/dri/Makefile.template b/src/mesa/drivers/dri/Makefile.template
index 5261a4b55d..883365094d 100644
--- a/src/mesa/drivers/dri/Makefile.template
+++ b/src/mesa/drivers/dri/Makefile.template
@@ -90,7 +90,7 @@ $(TOP)/$(LIB_DIR)/$(LIBNAME): $(LIBNAME)
depend: $(C_SOURCES) $(ASM_SOURCES) $(SYMLINKS)
touch depend
$(MKDEP) $(MKDEP_OPTIONS) $(DRIVER_DEFINES) $(INCLUDES) $(C_SOURCES) \
- $(ASM_SOURCES) 2>&1 /dev/null
+ $(ASM_SOURCES) 2> /dev/null
# Emacs tags
diff --git a/src/mesa/drivers/dri/i915tex/i830_vtbl.c b/src/mesa/drivers/dri/i915tex/i830_vtbl.c
index 5f1363caef..323c2db92c 100644
--- a/src/mesa/drivers/dri/i915tex/i830_vtbl.c
+++ b/src/mesa/drivers/dri/i915tex/i830_vtbl.c
@@ -426,7 +426,7 @@ i830_emit_state(struct intel_context *intel)
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
- intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
+ intel_batchbuffer_require_space(intel->batch, 0, get_state_size(state), 0);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
diff --git a/src/mesa/drivers/dri/i915tex/i915_state_fp.c b/src/mesa/drivers/dri/i915tex/i915_state_fp.c
index 0da54e1bf8..e8a768c2e3 100644
--- a/src/mesa/drivers/dri/i915tex/i915_state_fp.c
+++ b/src/mesa/drivers/dri/i915tex/i915_state_fp.c
@@ -74,6 +74,12 @@ static void i915_upload_fp( struct intel_context *intel )
OUT_BATCH( fp->program[i] );
ADVANCE_BATCH();
+
+#if 0
+ emit_indirect(intel, LI0_STATE_PROGRAM,
+ state->Program, state->ProgramSize * sizeof(GLuint));
+#endif
+
}
@@ -128,6 +134,11 @@ upload_constants(struct intel_context *intel)
ADVANCE_BATCH();
}
+
+#if 0
+ emit_indirect(intel, LI0_STATE_CONSTANTS,
+ state->Constant, state->ConstantSize * sizeof(GLuint));
+#endif
}
diff --git a/src/mesa/drivers/dri/i915tex/i915_state_immediate.c b/src/mesa/drivers/dri/i915tex/i915_state_immediate.c
index 7c8e4acf8d..e08748ad96 100644
--- a/src/mesa/drivers/dri/i915tex/i915_state_immediate.c
+++ b/src/mesa/drivers/dri/i915tex/i915_state_immediate.c
@@ -61,7 +61,7 @@ static void upload_S0S1( struct intel_context *intel )
OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
I1_LOAD_S(0) |
I1_LOAD_S(1) |
- 2);
+ 1);
/* INTEL_NEW_VBO, INTEL_NEW_RELOC */
OUT_RELOC(intel->state.vbo,
@@ -160,7 +160,7 @@ static void upload_S2S4(struct intel_context *intel)
OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
I1_LOAD_S(2) |
I1_LOAD_S(4) |
- 2);
+ 1);
OUT_BATCH(LIS2);
OUT_BATCH(LIS4);
ADVANCE_BATCH();
@@ -235,7 +235,7 @@ static void upload_S5( struct intel_context *intel )
BEGIN_BATCH(2, 0);
OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
I1_LOAD_S(5) |
- 1);
+ 0);
OUT_BATCH(LIS5);
ADVANCE_BATCH();
@@ -311,7 +311,7 @@ static void upload_S6( struct intel_context *intel )
BEGIN_BATCH(2, 0);
OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
I1_LOAD_S(6) |
- 1);
+ 0);
OUT_BATCH(LIS6);
ADVANCE_BATCH();
diff --git a/src/mesa/drivers/dri/i915tex/i915_state_indirect.c b/src/mesa/drivers/dri/i915tex/i915_state_indirect.c
index 4d199a139a..0445f8e6f7 100644
--- a/src/mesa/drivers/dri/i915tex/i915_state_indirect.c
+++ b/src/mesa/drivers/dri/i915tex/i915_state_indirect.c
@@ -4,6 +4,74 @@
* Need to figure out what STATIC vs DYNAMIC state is supposed to be.
*/
+
+
+static GLuint emit_indirect(struct intel_context *intel,
+ GLuint flag,
+ const GLuint *state,
+ GLuint size )
+{
+ GLuint delta;
+ GLuint segment;
+
+ switch (flag) {
+ case LI0_STATE_DYNAMIC_INDIRECT:
+ segment = SEGMENT_DYNAMIC_INDIRECT;
+
+ /* Dynamic indirect state is different - tell it the ending
+ * address, it will execute from either the previous end address
+ * or the beginning of the 4k page, depending on what it feels
+ * like.
+ */
+ delta = ((intel->batch->segment_finish_offset[segment] + size - 4) |
+ DIS0_BUFFER_VALID |
+ DIS0_BUFFER_RESET);
+
+
+ BEGIN_BATCH(2,0);
+ OUT_BATCH( _3DSTATE_LOAD_INDIRECT | flag | (1<<14) | 0);
+ OUT_RELOC( intel->batch->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE,
+ delta );
+ ADVANCE_BATCH();
+ break;
+
+ default:
+ segment = SEGMENT_OTHER_INDIRECT;
+
+ /* Other state is more conventional: tell the hardware the start
+ * point and size.
+ */
+ delta = (intel->batch->segment_finish_offset[segment] |
+ SIS0_FORCE_LOAD | /* XXX: fix me */
+ SIS0_BUFFER_VALID);
+
+ BEGIN_BATCH(3,0);
+ OUT_BATCH( _3DSTATE_LOAD_INDIRECT | flag | (1<<14) | 1);
+ OUT_RELOC( intel->batch->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE,
+ delta );
+ OUT_BATCH( (size/4)-1 );
+ ADVANCE_BATCH();
+
+
+ break;
+ }
+
+ {
+ GLuint offset = intel->batch->segment_finish_offset[segment];
+ intel->batch->segment_finish_offset[segment] += size;
+
+ if (state != NULL)
+ memcpy(intel->batch->map + offset, state, size);
+
+ return offset;
+ }
+}
+
+
/* "constant state", or constant-ish??
*/
static void emit_static_indirect_state()
diff --git a/src/mesa/drivers/dri/i915tex/i915_state_invarient.c b/src/mesa/drivers/dri/i915tex/i915_state_invarient.c
index befc12b76e..ecca70aad0 100644
--- a/src/mesa/drivers/dri/i915tex/i915_state_invarient.c
+++ b/src/mesa/drivers/dri/i915tex/i915_state_invarient.c
@@ -92,7 +92,7 @@ static void upload_invarient_state( struct intel_context *intel )
*/
(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
I1_LOAD_S(3) |
- (1)),
+ (0)),
(0),
(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT),
@@ -107,14 +107,17 @@ static void upload_invarient_state( struct intel_context *intel )
*/
(_3DSTATE_DEPTH_SUBRECT_DISABLE),
- /* Disable indirect state for now.
- */
- (_3DSTATE_LOAD_INDIRECT | 0),
- (0),
-
(_3DSTATE_BACKFACE_STENCIL_OPS | BFO_ENABLE_STENCIL_TWO_SIDE | 0)
};
+ /* Disable indirect state for now.
+ */
+ BEGIN_BATCH(2, 0);
+ OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+
+
{
GLuint i;
diff --git a/src/mesa/drivers/dri/i915tex/i915_vtbl.c b/src/mesa/drivers/dri/i915tex/i915_vtbl.c
index 35f8a80fb6..14b9c50ace 100644
--- a/src/mesa/drivers/dri/i915tex/i915_vtbl.c
+++ b/src/mesa/drivers/dri/i915tex/i915_vtbl.c
@@ -46,6 +46,211 @@
+#if 0
+
+
+
+#define OUT(x) do { \
+ if (0) _mesa_printf("OUT(0x%08x)\n", x); \
+ *p++ = (x); \
+} while(0)
+
+/* Push the state into the sarea and/or texture memory.
+ */
+static void
+i915_emit_state(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ struct i915_hw_state *state = i915->current;
+ GLuint dirty;
+ BATCH_LOCALS;
+
+ /* We don't hold the lock at this point, so want to make sure that
+ * there won't be a buffer wrap.
+ *
+ * It might be better to talk about explicit places where
+ * scheduling is allowed, rather than assume that it is whenever a
+ * batchbuffer fills up.
+ */
+ intel_batchbuffer_require_space(intel->batch, 0,
+ get_state_size(state), 0);
+
+ /* Do this here as we may have flushed the batchbuffer above,
+ * causing more state to be dirty!
+ */
+ dirty = get_dirty(state);
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
+
+ /* This should not change during a scene for HWZ, correct?
+ *
+ * If it does change, we probably have to flush everything and
+ * restart.
+ */
+ if (dirty & (I915_UPLOAD_INVARIENT | I915_UPLOAD_BUFFERS)) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_INVARIENT:\n");
+
+ i915_emit_invarient_state(intel);
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
+
+ /* Does this go in dynamic indirect state, or static indirect
+ * state???
+ */
+ BEGIN_BATCH(3, 0);
+ OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
+ OUT_RELOC(state->draw_region->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
+ state->draw_region->draw_offset);
+ ADVANCE_BATCH();
+
+ if (state->depth_region) {
+ BEGIN_BATCH(3, 0);
+ OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]);
+ OUT_RELOC(state->depth_region->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
+ state->depth_region->draw_offset);
+ ADVANCE_BATCH();
+ }
+
+ BEGIN_BATCH(2, 0);
+ OUT_BATCH(state->Buffer[I915_DESTREG_DV0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_DV1]);
+ ADVANCE_BATCH();
+
+#if 0
+ /* Where does scissor go?
+ */
+ OUT_BATCH(state->Buffer[I915_DESTREG_SENABLE]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SR0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SR1]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SR2]);
+#endif
+ }
+
+ if (dirty & I915_UPLOAD_CTX) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_CTX:\n");
+
+ /* Immediate state: always goes in the batchbuffer.
+ */
+ BEGIN_BATCH(5, 0);
+ OUT_BATCH(state->Ctx[I915_CTXREG_LI]);
+ OUT_BATCH(state->Ctx[I915_CTXREG_LIS2]);
+ OUT_BATCH(state->Ctx[I915_CTXREG_LIS4]);
+ OUT_BATCH(state->Ctx[I915_CTXREG_LIS5]);
+ OUT_BATCH(state->Ctx[I915_CTXREG_LIS6]);
+ ADVANCE_BATCH();
+
+ emit_indirect(intel,
+ LI0_STATE_DYNAMIC_INDIRECT,
+ state->Ctx + I915_CTXREG_STATE4,
+ 4 * sizeof(GLuint) );
+ }
+
+
+ /* Combine all the dirty texture state into a single command to
+ * avoid lockups on I915 hardware.
+ */
+ if (dirty & I915_UPLOAD_TEX_ALL) {
+ GLuint offset;
+ GLuint *p;
+ int i, nr = 0;
+
+ for (i = 0; i < I915_TEX_UNITS; i++)
+ if (dirty & I915_UPLOAD_TEX(i))
+ nr++;
+
+ /* A bit of a nasty kludge so that we can setup the relocation
+ * information for the buffer address in the indirect state
+ * packet:
+ */
+ offset = emit_indirect(intel,
+ LI0_STATE_MAP,
+ NULL,
+ (2 + nr * 3) * sizeof(GLuint) );
+
+ p = (GLuint *)(intel->batch->map + offset);
+
+ OUT(_3DSTATE_MAP_STATE | (3 * nr));
+ OUT((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
+
+ for (i = 0; i < I915_TEX_UNITS; i++)
+ if (dirty & I915_UPLOAD_TEX(i)) {
+ if (state->tex_buffer[i]) {
+ intel_batchbuffer_set_reloc( intel->batch,
+ ((GLubyte *)p) - intel->batch->map,
+ state->tex_buffer[i],
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
+ state->tex_offset[i]);
+ OUT(0); /* placeholder */
+ }
+ else {
+ assert(i == 0);
+ assert(state == &i915->meta);
+ OUT(0);
+ }
+
+ OUT(state->Tex[i][I915_TEXREG_MS3]);
+ OUT(state->Tex[i][I915_TEXREG_MS4]);
+ }
+
+
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "UPLOAD SAMPLERS:\n");
+
+ offset = emit_indirect(intel,
+ LI0_STATE_SAMPLER,
+ NULL,
+ (2 + nr * 3) * sizeof(GLuint) );
+
+
+ p = (GLuint *)(intel->batch->map + offset);
+
+
+ OUT(_3DSTATE_SAMPLER_STATE | (3 * nr));
+ OUT((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
+ for (i = 0; i < I915_TEX_UNITS; i++) {
+ if (dirty & I915_UPLOAD_TEX(i)) {
+ OUT(state->Tex[i][I915_TEXREG_SS2]);
+ OUT(state->Tex[i][I915_TEXREG_SS3]);
+ OUT(state->Tex[i][I915_TEXREG_SS4]);
+ }
+ }
+ }
+
+ if (dirty & I915_UPLOAD_PROGRAM) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
+
+ assert((state->Program[0] & 0x1ff) + 2 == state->ProgramSize);
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ i915_disassemble_program(state->Program, state->ProgramSize);
+ }
+
+
+ if (dirty & I915_UPLOAD_CONSTANTS) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
+
+ }
+
+
+ state->emitted |= dirty;
+}
+
+#endif
+
static void
i915_destroy_context(struct intel_context *intel)
diff --git a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
index 9f6d82eef4..d63d9ba086 100644
--- a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
@@ -67,15 +67,48 @@
* server automatically waits on its own dma to complete before
* modifying cliprects ???
*/
-
-static void
-intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count)
+static void dump(GLuint offset, GLuint *ptr, GLuint count)
{
- int i;
- fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4);
- for (i = 0; i < count / 4; i += 4)
+ GLuint i;
+
+#if 0
+ for (i = 0; i < count; i += 4)
fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
+#else
+ for (i = 0; i < count; i++)
+ fprintf(stderr, "0x%x:\t0x%08x\n",
+ offset + i * 4, ptr[i]);
+#endif
+}
+
+
+static void
+intel_dump_batchbuffer(struct intel_batchbuffer *batch, GLubyte *map)
+{
+ GLuint *ptr = (GLuint *)map;
+ GLuint count = batch->segment_finish_offset[0];
+ GLuint buf0 = driBOOffset(batch->buffer);
+ GLuint buf = buf0;;
+
+ fprintf(stderr, "\n\n\nIMMEDIATE: (%d)\n", count / 4);
+ dump( buf, ptr, count/4 );
+ fprintf(stderr, "END BATCH\n\n\n");
+
+ count = batch->segment_finish_offset[1] - batch->segment_start_offset[1];
+ ptr = (GLuint *)(map + batch->segment_start_offset[1]);
+ buf = buf0 + batch->segment_start_offset[1];
+
+ fprintf(stderr, "\n\n\nDYNAMIC: (%d)\n", count / 4);
+ dump( buf, ptr, count/4 );
+ fprintf(stderr, "END BATCH\n\n\n");
+
+ count = batch->segment_finish_offset[2] - batch->segment_start_offset[2];
+ ptr = (GLuint *)(map + batch->segment_start_offset[2]);
+ buf = buf0 + batch->segment_start_offset[2];
+
+ fprintf(stderr, "\n\n\nOTHER INDIRECT: (%d)\n", count / 4);
+ dump( buf, ptr, count/4 );
fprintf(stderr, "END BATCH\n\n\n");
}
@@ -100,7 +133,8 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
- driBOUnReference(r->buf);
+ if (r->buf != batch->buffer)
+ driBOUnReference(r->buf);
}
batch->list_count = 0;
@@ -119,7 +153,10 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
- batch->ptr = batch->map;
+
+ batch->segment_finish_offset[0] = batch->segment_start_offset[0];
+ batch->segment_finish_offset[1] = batch->segment_start_offset[1];
+ batch->segment_finish_offset[2] = batch->segment_start_offset[2];
}
/*======================================================================
@@ -137,6 +174,19 @@ intel_batchbuffer_alloc(struct intel_context *intel)
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
batch->last_fence = NULL;
driBOCreateList(20, &batch->list);
+
+ batch->segment_start_offset[0] = 0 * SEGMENT_SZ;
+ batch->segment_start_offset[1] = 1 * SEGMENT_SZ;
+ batch->segment_start_offset[2] = 2 * SEGMENT_SZ;
+
+ batch->segment_finish_offset[0] = 0 * SEGMENT_SZ;
+ batch->segment_finish_offset[1] = 1 * SEGMENT_SZ;
+ batch->segment_finish_offset[2] = 2 * SEGMENT_SZ;
+
+ batch->segment_max_offset[0] = 1 * SEGMENT_SZ - BATCH_RESERVED;
+ batch->segment_max_offset[1] = 2 * SEGMENT_SZ;
+ batch->segment_max_offset[2] = 3 * SEGMENT_SZ;
+
intel_batchbuffer_reset(batch);
return batch;
}
@@ -186,10 +236,14 @@ do_flush_locked(struct intel_batchbuffer *batch,
struct buffer_reloc *r = &batch->reloc[i];
ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta;
+
+ if (INTEL_DEBUG & DEBUG_BATCH)
+ _mesa_printf("reloc offset %x value 0x%x + 0x%x\n",
+ r->offset, driBOOffset(r->buf), r->delta);
}
- if (INTEL_DEBUG & DEBUG_BATCH)
- intel_dump_batchbuffer(0, ptr, used);
+ if (INTEL_DEBUG & DEBUG_BATCH)
+ intel_dump_batchbuffer(batch, ptr);
driBOUnmap(batch->buffer);
batch->map = NULL;
@@ -256,8 +310,9 @@ struct _DriFenceObject *
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
struct intel_context *intel = batch->intel;
- GLuint used = batch->ptr - batch->map;
+ GLuint used = batch->segment_finish_offset[0] - batch->segment_start_offset[0];
GLboolean was_locked = intel->locked;
+ GLint *ptr = (GLint *)(batch->map + batch->segment_finish_offset[0]);
if (used == 0)
return batch->last_fence;
@@ -269,19 +324,18 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
* performance drain that we would like to avoid.
*/
if (used & 4) {
- ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->ptr)[1] = 0;
- ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
+ ptr[0] = intel->vtbl.flush_cmd();
+ ptr[1] = 0;
+ ptr[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
- ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
+ ptr[0] = intel->vtbl.flush_cmd();
+ ptr[1] = MI_BATCH_BUFFER_END;
used += 8;
}
driBOUnmap(batch->buffer);
- batch->ptr = NULL;
batch->map = NULL;
/* TODO: Just pass the relocation list and dma buffer up to the
@@ -315,34 +369,54 @@ intel_batchbuffer_finish(struct intel_batchbuffer *batch)
/* This is the only way buffers get added to the validate list.
*/
GLboolean
-intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- struct _DriBufferObject *buffer,
- GLuint flags, GLuint mask, GLuint delta)
+intel_batchbuffer_set_reloc(struct intel_batchbuffer *batch,
+ GLuint offset,
+ struct _DriBufferObject *buffer,
+ GLuint flags, GLuint mask, GLuint delta)
{
assert(batch->nr_relocs < MAX_RELOCS);
+ assert((offset & 3) == 0);
- driBOAddListItem(&batch->list, buffer, flags, mask);
+ if (buffer != batch->buffer)
+ driBOAddListItem(&batch->list, buffer, flags, mask);
{
struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
- driBOReference(buffer);
+
+ if (buffer != batch->buffer)
+ driBOReference(buffer);
+
r->buf = buffer;
- r->offset = batch->ptr - batch->map;
+ r->offset = offset;
r->delta = delta;
}
- batch->ptr += 4;
return GL_TRUE;
}
+GLboolean
+intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ GLuint segment,
+ struct _DriBufferObject *buffer,
+ GLuint flags, GLuint mask, GLuint delta)
+{
+ intel_batchbuffer_set_reloc( batch,
+ batch->segment_finish_offset[segment],
+ buffer, flags, mask, delta );
+
+ batch->segment_finish_offset[segment] += 4;
+ return GL_TRUE;
+}
+
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ GLuint segment,
const void *data, GLuint bytes, GLuint flags)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(batch, bytes, flags);
- __memcpy(batch->ptr, data, bytes);
- batch->ptr += bytes;
+ intel_batchbuffer_require_space(batch, segment, bytes, flags);
+ __memcpy(batch->map + batch->segment_finish_offset[segment], data, bytes);
+ batch->segment_finish_offset[segment] += bytes;
}
diff --git a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
index cae24c3893..7bf705e907 100644
--- a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
@@ -9,7 +9,8 @@ struct intel_context;
/* Must be able to hold at minimum VB->Size * 3 * 2 bytes for
* intel_idx_render.c indices, which is currently about 20k.
*/
-#define BATCH_SZ (64*1024)
+#define BATCH_SZ (3*32*1024)
+#define SEGMENT_SZ (32*1024)
#define BATCH_RESERVED 16
#define MAX_RELOCS 400
@@ -24,6 +25,13 @@ struct buffer_reloc
GLuint delta; /* not needed? */
};
+enum {
+ SEGMENT_IMMEDIATE = 0,
+ SEGMENT_DYNAMIC_INDIRECT = 1,
+ SEGMENT_OTHER_INDIRECT = 2,
+ NR_SEGMENTS = 3
+};
+
struct intel_batchbuffer
{
struct bufmgr *bm;
@@ -36,11 +44,18 @@ struct intel_batchbuffer
drmBOList list;
GLuint list_count;
GLubyte *map;
- GLubyte *ptr;
struct buffer_reloc reloc[MAX_RELOCS];
GLuint nr_relocs;
GLuint size;
+
+ /* Put all the different types of packets into one buffer for
+ * easier validation. This will have to change, but for now it is
+ * enough to get started.
+ */
+ GLuint segment_start_offset[NR_SEGMENTS];
+ GLuint segment_finish_offset[NR_SEGMENTS];
+ GLuint segment_max_offset[NR_SEGMENTS];
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
@@ -62,12 +77,21 @@ void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ GLuint segment,
const void *data, GLuint bytes, GLuint flags);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
+ GLuint segment,
GLuint bytes);
+GLboolean
+intel_batchbuffer_set_reloc(struct intel_batchbuffer *batch,
+ GLuint offset,
+ struct _DriBufferObject *buffer,
+ GLuint flags, GLuint mask, GLuint delta);
+
GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ GLuint segment,
struct _DriBufferObject *buffer,
GLuint flags,
GLuint mask, GLuint offset);
@@ -78,27 +102,35 @@ GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
* work...
*/
static INLINE GLuint
-intel_batchbuffer_space(struct intel_batchbuffer *batch)
+intel_batchbuffer_space(struct intel_batchbuffer *batch,
+ GLuint segment)
{
- return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
+ return (batch->segment_max_offset[segment] -
+ batch->segment_finish_offset[segment]);
}
static INLINE void
-intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
+intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch,
+ GLuint segment,
+ GLuint dword)
{
assert(batch->map);
- assert(intel_batchbuffer_space(batch) >= 4);
- *(GLuint *) (batch->ptr) = dword;
- batch->ptr += 4;
+ assert(intel_batchbuffer_space(batch, segment) >= 4);
+ *(GLuint *) (batch->map + batch->segment_finish_offset[segment]) = dword;
+ batch->segment_finish_offset[segment] += 4;
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
+ GLuint segment,
GLuint sz, GLuint flags)
{
- assert(sz < batch->size - 8);
- if (intel_batchbuffer_space(batch) < sz ||
+ /* XXX: need to figure out flushing, etc.
+ */
+ assert(sz < SEGMENT_SZ);
+
+ if (intel_batchbuffer_space(batch, segment) < sz ||
(batch->flags != 0 && flags != 0 && batch->flags != flags))
intel_batchbuffer_flush(batch);
@@ -109,33 +141,44 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
*/
#define BATCH_LOCALS
-#define BEGIN_BATCH(n, flags) do { \
+
+/* Hack for indirect emit:
+ */
+#define BEGIN_BATCH_SEGMENT(seg, n, flags) do { \
assert(!intel->prim.flush); \
- intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
- _mesa_printf("BEGIN_BATCH(%d,%d) in %s\n", n, flags, __FUNCTION__); \
+ intel_batchbuffer_require_space(intel->batch, seg, (n)*4, flags); \
+ if (0) _mesa_printf("BEGIN_BATCH(%d,%d,%d) in %s\n", seg, n, flags, __FUNCTION__); \
} while (0)
-#define OUT_BATCH(d) do { \
- _mesa_printf("OUT_BATCH(0x%08x)\n", d); \
- intel_batchbuffer_emit_dword(intel->batch, d); \
+#define OUT_BATCH_SEGMENT(seg, d) do { \
+ if (0) _mesa_printf("OUT_BATCH(%d, 0x%08x)\n", seg, d); \
+ intel_batchbuffer_emit_dword(intel->batch, seg, d); \
} while (0)
-#define OUT_BATCH_F(fl) do { \
+#define OUT_BATCH_F_SEGMENT(seg, fl) do { \
fi_type fi; \
fi.f = fl; \
- _mesa_printf("OUT_BATCH(0x%08x)\n", fi.i); \
- intel_batchbuffer_emit_dword(intel->batch, fi.i); \
+ if (0) _mesa_printf("OUT_BATCH(%d, 0x%08x)\n", seg, fi.i); \
+ intel_batchbuffer_emit_dword(intel->batch, seg, fi.i); \
} while (0)
-#define OUT_RELOC(buf,flags,mask,delta) do { \
+#define OUT_RELOC_SEGMENT(seg, buf,flags,mask,delta) do { \
assert((delta) >= 0); \
- _mesa_printf("OUT_RELOC( buf %p offset %x )\n", buf, delta); \
- intel_batchbuffer_emit_reloc(intel->batch, buf, flags, mask, delta); \
+ if (0) _mesa_printf("OUT_RELOC( seg %d buf %p offset %x )\n", seg, buf, delta); \
+ intel_batchbuffer_emit_reloc(intel->batch, seg, buf, flags, mask, delta); \
} while (0)
-#define ADVANCE_BATCH() do { \
- _mesa_printf("ADVANCE_BATCH()\n"); \
+#define ADVANCE_BATCH_SEGMENT(seg) do { \
+ if (0) _mesa_printf("ADVANCE_BATCH()\n"); \
} while(0)
+#define BEGIN_BATCH(n, flags) BEGIN_BATCH_SEGMENT(0, n, flags)
+#define OUT_BATCH(d) OUT_BATCH_SEGMENT(0, d)
+#define OUT_BATCH_F(fl) OUT_BATCH_F_SEGMENT(0, fl)
+#define OUT_RELOC(buf,flags,mask,delta) OUT_RELOC_SEGMENT(0,buf,flags,mask, delta)
+#define ADVANCE_BATCH() ADVANCE_BATCH_SEGMENT(0)
+
+
+
#endif
diff --git a/src/mesa/drivers/dri/i915tex/intel_context.c b/src/mesa/drivers/dri/i915tex/intel_context.c
index f2c009b75c..ad4b9b0053 100644
--- a/src/mesa/drivers/dri/i915tex/intel_context.c
+++ b/src/mesa/drivers/dri/i915tex/intel_context.c
@@ -277,7 +277,7 @@ intelFlush(GLcontext * ctx)
INTEL_FIREVERTICES(intel);
- if (intel->batch->map != intel->batch->ptr)
+ if (intel->batch->segment_finish_offset[0] != 0)
intel_batchbuffer_flush(intel->batch);
/* XXX: Need to do an MI_FLUSH here.
diff --git a/src/mesa/drivers/dri/i915tex/intel_idx_render.c b/src/mesa/drivers/dri/i915tex/intel_idx_render.c
deleted file mode 100644
index 8cf0adbce6..0000000000
--- a/src/mesa/drivers/dri/i915tex/intel_idx_render.c
+++ /dev/null
@@ -1,429 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/*
- * Render vertex buffers by emitting vertices directly to dma buffers.
- */
-#include "glheader.h"
-#include "context.h"
-#include "macros.h"
-#include "imports.h"
-#include "mtypes.h"
-#include "enums.h"
-
-#include "tnl/t_context.h"
-#include "tnl/t_vertex.h"
-
-#include "intel_screen.h"
-#include "intel_context.h"
-#include "intel_tris.h"
-#include "intel_batchbuffer.h"
-#include "intel_buffer_objects.h"
-#include "intel_reg.h"
-#include "intel_idx_render.h"
-
-
-#define FILE_DEBUG_FLAG DEBUG_IDX
-
-
-
-#define MAX_VBO 32 /* XXX: make dynamic */
-#define VBO_SIZE (128*1024)
-
-
-/* Basically limited to what is addressable by the 16-bit indices,
- * and the number of indices we can fit in a batchbuffer after
- * making room for state.
- */
-#define HW_MAX_INDEXABLE_VERTS 0xfffe
-#define HW_MAX_INDICES ((BATCH_SZ - 1024) / 2)
-
-
-
-struct intel_vb {
- struct intel_context *intel;
-
- struct intel_buffer_object *vbo[MAX_VBO];
- GLuint nr_vbo;
-
- struct intel_buffer_object *current_vbo;
-
- GLuint current_vbo_size;
- GLuint current_vbo_used;
- void *current_vbo_ptr;
-
- GLuint hw_vbo_offset;
- GLuint hw_vbo_delta;
- GLuint vertex_size;
-
- GLuint dirty;
-};
-
-
-/* Have to fallback for:
- * - points (needs different viewport)
- * - twoside light
- * - z offset
- * - unfilled prims
- * - lines && linestipple
- * - tris && tristipple && !hwstipple
- * - point attenuation (bug!)
- * - aa tris && strict-conformance
- * - aa points && strict-conformance
- * - PLUS: any fallback-to-swrast condition (intel->Fallback)
- *
- * If binning, need to flush bins and fall
- */
-static GLboolean check_idx_render(GLcontext *ctx,
- struct vertex_buffer *VB,
- GLuint *max_nr_verts,
- GLuint *max_nr_indices )
-
-{
- struct intel_context *intel = intel_context(ctx);
- GLuint i;
-
- if (intel->Fallback != 0 ||
- intel->RenderIndex != 0)
- return GL_FALSE;
-
- /* These are constant, but for some hardware they might vary
- * depending on the state, eg. according to vertex size.
- */
- *max_nr_verts = HW_MAX_INDEXABLE_VERTS;
- *max_nr_indices = HW_MAX_INDICES;
-
- /* Fix points with different dstorg bias state?? or use different
- * viewport transform in this case only (requires flush at level
- * above).
- */
- for (i = 0; i < VB->PrimitiveCount; i++) {
- if (VB->Primitive[i].mode == GL_POINTS)
- return GL_FALSE;
- }
-
- return GL_TRUE;
-}
-
-static void
-emit_vb_state( struct intel_vb *vb )
-{
- struct intel_context *intel = vb->intel;
-
- DBG("%s\n", __FUNCTION__);
-
- intel->state.vbo = vb->current_vbo->buffer;
- intel->state.vbo_offset = vb->hw_vbo_offset;
- intel->state.dirty.intel |= INTEL_NEW_VBO;
-
- vb->dirty = 0;
-}
-
-static void
-release_current_vbo( struct intel_vb *vb )
-{
- GLcontext *ctx = &vb->intel->ctx;
-
- DBG("%s\n", __FUNCTION__);
-
- if (vb->current_vbo_ptr)
- ctx->Driver.UnmapBuffer( ctx,
- GL_ARRAY_BUFFER_ARB,
- &vb->current_vbo->Base );
-
- vb->current_vbo = NULL;
- vb->current_vbo_ptr = NULL;
- vb->current_vbo_size = 0;
- vb->current_vbo_used = 0;
- vb->dirty = 0;
-}
-
-static void
-reset_vbo( struct intel_vb *vb )
-{
- DBG("%s\n", __FUNCTION__);
-
- if (vb->current_vbo)
- release_current_vbo( vb );
-
- vb->nr_vbo = 0;
-}
-
-
-
-
-static void
-get_next_vbo( struct intel_vb *vb, GLuint size )
-{
- GLcontext *ctx = &vb->intel->ctx;
-
- DBG("%s\n", __FUNCTION__);
-
- /* XXX: just allocate more vbos here:
- */
- if (vb->nr_vbo == MAX_VBO) {
- DBG("XXX: out of vbo's, flushing\n");
- INTEL_FIREVERTICES(vb->intel);
- intel_batchbuffer_flush(vb->intel->batch);
- reset_vbo(vb);
- }
-
- /* Unmap current vbo:
- */
- if (vb->current_vbo)
- release_current_vbo( vb );
-
- if (size < VBO_SIZE)
- size = VBO_SIZE;
-
- vb->current_vbo = vb->vbo[vb->nr_vbo++];
- vb->current_vbo_size = size;
- vb->current_vbo_used = 0;
- vb->hw_vbo_offset = 0;
- vb->dirty = 1;
-
- /* Clear out buffer contents and break any hardware dependency on
- * the old memory:
- */
- ctx->Driver.BufferData( ctx,
- GL_ARRAY_BUFFER_ARB,
- vb->current_vbo_size,
- NULL,
- GL_DYNAMIC_DRAW_ARB,
- &vb->current_vbo->Base );
-
-}
-
-static void *get_space( struct intel_vb *vb, GLuint nr, GLuint vertex_size )
-{
- GLcontext *ctx = &vb->intel->ctx;
- void *ptr;
- GLuint space = nr * vertex_size * 4;
-
- DBG("%s %d*%d, vbo %d\n", __FUNCTION__, nr, vertex_size, vb->nr_vbo);
-
- if (vb->current_vbo_used + space > vb->current_vbo_size || !vb->current_vbo_ptr)
- get_next_vbo( vb, space );
-
- if (vb->vertex_size != vertex_size) {
- vb->vertex_size = vertex_size;
- vb->hw_vbo_offset = vb->current_vbo_used;
- vb->dirty = 1;
- }
-
- if (!vb->current_vbo_ptr) {
- DBG("%s map vbo %d\n", __FUNCTION__, vb->nr_vbo);
-
- /* Map the vbo now, will be unmapped in release_current_vbo, above.
- */
- vb->current_vbo_ptr = ctx->Driver.MapBuffer( ctx,
- GL_ARRAY_BUFFER_ARB,
- GL_WRITE_ONLY,
- &vb->current_vbo->Base );
- }
-
-
- /* Hmm, could just re-emit the vertex buffer packet & avoid this:
- */
- vb->hw_vbo_delta = (vb->current_vbo_used - vb->hw_vbo_offset) / (vb->vertex_size * 4);
-
- ptr = vb->current_vbo_ptr + vb->current_vbo_used;
- vb->current_vbo_used += space;
-
- return ptr;
-}
-
-
-static void
-build_and_emit_vertices(GLcontext * ctx, GLuint nr)
-{
- struct intel_context *intel = intel_context(ctx);
- TNLcontext *tnl = TNL_CONTEXT(ctx);
- void *ptr = get_space(intel->vb, nr, intel->vertex_size );
-
- DBG("%s %d\n", __FUNCTION__, nr);
-
- assert(tnl->clipspace.vertex_size == intel->vertex_size * 4);
-
- tnl->clipspace.new_inputs |= VERT_BIT_POS;
- _tnl_emit_vertices_to_buffer( ctx, 0, nr, ptr );
-}
-
-/* Emits vertices previously built by a call to BuildVertices.
- *
- * XXX: have t_vertex.c use our buffer to build into and avoid the
- * copy (assuming our buffer is cached...)
- */
-static void emit_built_vertices( GLcontext *ctx, GLuint nr )
-{
- struct intel_context *intel = intel_context(ctx);
- void *ptr = get_space(intel->vb, nr, intel->vertex_size );
-
- DBG("%s %d\n", __FUNCTION__, nr);
-
- memcpy(ptr, _tnl_get_vertex(ctx, 0),
- nr * intel->vertex_size * sizeof(GLuint));
-}
-
-/* Emit primitives and indices referencing the previously emitted
- * vertex buffer.
- */
-static void emit_prims( GLcontext *ctx,
- const struct _mesa_prim *prim,
- GLuint nr_prims,
- const GLuint *indices,
- GLuint nr_indices )
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_vb *vb = intel->vb;
- GLuint i, j;
-
- assert(indices);
-
- DBG("%s - start\n", __FUNCTION__);
-
-
- for (i = 0; i < nr_prims; i++) {
- GLuint nr, hw_prim;
- GLuint start = prim[i].start;
- GLuint offset = vb->hw_vbo_delta;
-
- switch (prim[i].mode) {
- case GL_TRIANGLES:
- hw_prim = PRIM3D_TRILIST;
- nr = prim[i].count - prim[i].count % 3;
- break;
- case GL_LINES:
- hw_prim = PRIM3D_LINELIST;
- nr = prim[i].count - prim[i].count % 2;
- break;
- default:
- assert(0);
- continue;
- }
-
- if (nr == 0)
- continue;
-
- /* XXX: Need to ensure that both the state and the primitive
- * command below end up in the same batchbuffer, otherwise there
- * is a risk that another context might interpose a batchbuffer
- * containing different statesetting commands. Using logical
- * contexts would fix this, as would the BRW scheme of only
- * emitting batch commands while holding the lock.
- */
- if (vb->dirty)
- emit_vb_state( vb );
-
- intel_emit_state(intel);
-
- /* XXX: Can emit upto 64k indices, need to split larger prims
- */
- BEGIN_BATCH(2 + (nr+1)/2, INTEL_BATCH_CLIPRECTS);
-
- OUT_BATCH(0);
- OUT_BATCH( _3DPRIMITIVE |
- hw_prim |
- PRIM_INDIRECT |
- PRIM_INDIRECT_ELTS |
- nr );
-
- /* Pack indices into 16bits
- */
- for (j = 0; j < nr-1; j += 2) {
- OUT_BATCH( (offset + indices[start+j]) | ((offset + indices[start+j+1])<<16) );
- }
-
- if (j < nr)
- OUT_BATCH( (offset + indices[start+j]) );
-
- ADVANCE_BATCH();
- }
-
-
- DBG("%s - done\n", __FUNCTION__);
-}
-
-
-/* Callback from (eventually) intel_batchbuffer_flush()
- */
-void intel_idx_lost_hardware( struct intel_context *intel )
-{
- GLcontext *ctx = &intel->ctx;
- struct intel_vb *vb = intel->vb;
-
- DBG("%s\n", __FUNCTION__);
-
- if (vb->current_vbo_ptr) {
- ctx->Driver.UnmapBuffer( ctx,
- GL_ARRAY_BUFFER_ARB,
- &vb->current_vbo->Base );
- vb->current_vbo_ptr = NULL;
- }
-}
-
-void intel_idx_init( struct intel_context *intel )
-{
- GLcontext *ctx = &intel->ctx;
- TNLcontext *tnl = TNL_CONTEXT(ctx);
- GLuint i;
-
- tnl->Driver.Render.CheckIdxRender = check_idx_render;
- tnl->Driver.Render.BuildAndEmitVertices = build_and_emit_vertices;
- tnl->Driver.Render.EmitBuiltVertices = emit_built_vertices;
- tnl->Driver.Render.EmitPrims = emit_prims;
-
-
- /* Create the vb context:
- */
- intel->vb = CALLOC_STRUCT( intel_vb );
- intel->vb->intel = intel;
-
- for (i = 0; i < MAX_VBO; i++) {
- intel->vb->vbo[i] = (struct intel_buffer_object *)
- ctx->Driver.NewBufferObject(ctx, 1, GL_ARRAY_BUFFER_ARB);
- }
-}
-
-void intel_idx_destroy( struct intel_context *intel )
-{
- GLcontext *ctx = &intel->ctx;
- struct intel_vb *vb = intel->vb;
- GLuint i;
-
- if (vb) {
- reset_vbo( vb );
-
- /* Destroy the vbo:
- */
- for (i = 0; i < MAX_VBO; i++)
- if (vb->vbo[i])
- ctx->Driver.DeleteBuffer( ctx, &vb->vbo[i]->Base );
-
- FREE( intel->vb );
- }
-}
diff --git a/src/mesa/drivers/dri/i915tex/intel_idx_render.h b/src/mesa/drivers/dri/i915tex/intel_idx_render.h
deleted file mode 100644
index 6639bf5507..0000000000
--- a/src/mesa/drivers/dri/i915tex/intel_idx_render.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef INTEL_IDX_H
-#define INTEL_IDX_H
-
-#include "intel_context.h"
-
-void intel_idx_init( struct intel_context *intel );
-void intel_idx_destroy( struct intel_context *intel );
-void intel_idx_lost_hardware( struct intel_context *intel );
-
-#endif
diff --git a/src/mesa/drivers/dri/i915tex/intel_state_callbacks.c b/src/mesa/drivers/dri/i915tex/intel_state_callbacks.c
index 86b89fcdaf..5c1f994c68 100644
--- a/src/mesa/drivers/dri/i915tex/intel_state_callbacks.c
+++ b/src/mesa/drivers/dri/i915tex/intel_state_callbacks.c
@@ -86,8 +86,6 @@ static void update_viewport( struct intel_context *intel )
const GLfloat depthScale = 1.0F / DrawBuffer->_DepthMaxF;
GLfloat *m = intel->ViewportMatrix.m;
- _mesa_printf("depth scale %f\n", depthScale);
-
m[MAT_SX] = v[MAT_SX];
m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X;
diff --git a/src/mesa/drivers/dri/i915tex/intel_tris.c b/src/mesa/drivers/dri/i915tex/intel_tris.c
index 795f7f66c4..1341cb197c 100644
--- a/src/mesa/drivers/dri/i915tex/intel_tris.c
+++ b/src/mesa/drivers/dri/i915tex/intel_tris.c
@@ -57,7 +57,7 @@ static void intelRasterPrimitive(GLcontext * ctx, GLenum rprim,
static void
intel_flush_inline_primitive(struct intel_context *intel)
{
- GLuint used = intel->batch->ptr - intel->prim.start_ptr;
+ GLuint used = intel->batch->segment_finish_offset[0];
assert(intel->prim.primitive != ~0);
@@ -72,7 +72,7 @@ intel_flush_inline_primitive(struct intel_context *intel)
goto finished;
do_discard:
- intel->batch->ptr -= used;
+ intel->batch->segment_finish_offset[0] -= used;
finished:
intel->prim.primitive = ~0;
@@ -96,7 +96,8 @@ intelStartInlinePrimitive(struct intel_context *intel,
* be emitted to a batchbuffer missing the required full-state
* preamble.
*/
- if (intel_batchbuffer_space(intel->batch) < 100) {
+ if (intel_batchbuffer_space(intel->batch, 0) < 100) {
+ assert(0); /* XXX: later! */
intel_batchbuffer_flush(intel->batch);
intel_emit_state(intel);
}
@@ -109,7 +110,7 @@ intelStartInlinePrimitive(struct intel_context *intel,
BEGIN_BATCH(2, batch_flags);
OUT_BATCH(0);
- intel->prim.start_ptr = intel->batch->ptr;
+ intel->prim.start_ptr = intel->batch->map + intel->batch->segment_start_offset[0];
intel->prim.primitive = prim;
intel->prim.flush = intel_flush_inline_primitive;
@@ -139,15 +140,17 @@ intelExtendInlinePrimitive(struct intel_context *intel, GLuint dwords)
assert(intel->prim.flush == intel_flush_inline_primitive);
- if (intel_batchbuffer_space(intel->batch) < sz)
+ if (intel_batchbuffer_space(intel->batch, 0) < sz) {
+ assert(0); /* XXX: later */
intelWrapInlinePrimitive(intel);
+ }
/* _mesa_printf("."); */
assert(intel->state.dirty.intel == 0);
- ptr = (GLuint *) intel->batch->ptr;
- intel->batch->ptr += sz;
+ ptr = (GLuint *) (intel->batch->map + intel->batch->segment_finish_offset[0]);
+ intel->batch->segment_finish_offset[0] += sz;
return ptr;
}