summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Mueller <MarkKMueller@gmail.com>2013-12-02 17:08:00 -0800
committerMark Mueller <MarkKMueller@gmail.com>2014-01-13 09:44:31 -0800
commit38112c74d961fd991e4c6f27954a4223116f9539 (patch)
treec2e55bce8012f0802ea31a8e79ae341dd4b9d051
parent5fa2292d4d19a008cefc3dd4b9f14451ce08bb03 (diff)
Change handling of Z16 depth textures
-rw-r--r--src/mesa/drivers/dri/i965/brw_surface_formats.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/src/mesa/drivers/dri/i965/brw_surface_formats.c b/src/mesa/drivers/dri/i965/brw_surface_formats.c
index ef0e95ebe7..25bd6993bf 100644
--- a/src/mesa/drivers/dri/i965/brw_surface_formats.c
+++ b/src/mesa/drivers/dri/i965/brw_surface_formats.c
@@ -472,9 +472,21 @@ brw_format_for_mesa_format(gl_format mesa_format, blorp_process_format *process_
[MESA_FORMAT_S8_Z24] = {BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
BRW_SURFACEFORMAT_I24X8_UNORM,
0},
+ /* It appears that Z16 is slower than Z24 (on Intel Ivybridge and newer
+ * hardware at least), so there's no real reason to prefer it unless you're
+ * under memory (not memory bandwidth) pressure. Our speculation is that
+ * this is due to either increased fragment shader execution from
+ * GL_LEQUAL/GL_EQUAL depth tests at the reduced precision, or due to
+ * increased depth stalls from a cacheline-based heuristic for detecting
+ * depth stalls.
+ *
+ * However, desktop GL 3.0+ require that you get exactly 16 bits when
+ * asking for DEPTH_COMPONENT16, so we have to respect that, thus the
+ * desktop_gl_sel flag.
+ */
[MESA_FORMAT_Z16] = {BRW_SURFACEFORMAT_R16_UNORM,
BRW_SURFACEFORMAT_I16_UNORM,
- 0},
+ desktop_gl_sel},
[MESA_FORMAT_X8_Z24] = {BRW_SURFACEFORMAT_B8G8R8A8_UNORM,
BRW_SURFACEFORMAT_I24X8_UNORM,
0},
@@ -940,6 +952,8 @@ brw_init_surface_formats(struct brw_context *brw)
memset(&ctx->TextureFormatSupported, 0, sizeof(ctx->TextureFormatSupported));
+ const unsigned render_not_supported_test =
+ render_not_supported_mask | _mesa_is_desktop_gl(ctx) ? 0 : desktop_gl_sel;
for (gl_format format = MESA_FORMAT_NONE + 1; format < MESA_FORMAT_COUNT; format++) {
BRW_SURFACE sample_surface, render_surface;
blorp_process_format process_flags;
@@ -960,23 +974,9 @@ brw_init_surface_formats(struct brw_context *brw)
brw_format_for_render(brw, render_surface) &&
(brw_format_for_blend(brw, render_surface) || /* SINTs, UINTs, and YCRCB don't alpha blend. */
is_integer) &&
- 0 == (process_flags & render_not_supported_mask);
+ 0 == (process_flags & render_not_supported_test);
}
- /* It appears that Z16 is slower than Z24 (on Intel Ivybridge and newer
- * hardware at least), so there's no real reason to prefer it unless you're
- * under memory (not memory bandwidth) pressure. Our speculation is that
- * this is due to either increased fragment shader execution from
- * GL_LEQUAL/GL_EQUAL depth tests at the reduced precision, or due to
- * increased depth stalls from a cacheline-based heuristic for detecting
- * depth stalls.
- *
- * However, desktop GL 3.0+ require that you get exactly 16 bits when
- * asking for DEPTH_COMPONENT16, so we have to respect that.
- */
- if (_mesa_is_desktop_gl(ctx))
- ctx->TextureFormatSupported[MESA_FORMAT_Z16] = true;
-
/* On hardware that lacks support for ETC1, we map ETC1 to RGBX
* during glCompressedTexImage2D(). See intel_mipmap_tree::wraps_etc1.
*/