summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/radeon/r600_texture.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/radeon/r600_texture.c')
-rw-r--r--src/gallium/drivers/radeon/r600_texture.c225
1 files changed, 63 insertions, 162 deletions
diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c
index e9507c3f54..f7b9740895 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -44,13 +44,13 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
const struct pipe_resource *templ);
-bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
- struct r600_texture *rdst,
- unsigned dst_level, unsigned dstx,
- unsigned dsty, unsigned dstz,
- struct r600_texture *rsrc,
- unsigned src_level,
- const struct pipe_box *src_box)
+bool si_prepare_for_dma_blit(struct r600_common_context *rctx,
+ struct r600_texture *rdst,
+ unsigned dst_level, unsigned dstx,
+ unsigned dsty, unsigned dstz,
+ struct r600_texture *rsrc,
+ unsigned src_level,
+ const struct pipe_box *src_box)
{
if (!rctx->dma.cs)
return false;
@@ -237,7 +237,7 @@ static int r600_init_surface(struct r600_common_screen *rscreen,
is_depth = util_format_has_depth(desc);
is_stencil = util_format_has_stencil(desc);
- if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
+ if (!is_flushed_depth &&
ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
bpe = 4; /* stencil is allocated separately on evergreen */
} else {
@@ -408,10 +408,7 @@ static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
rtex->dirty_level_mask = 0;
- if (rscreen->chip_class >= SI)
- rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1);
- else
- rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
+ rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1);
if (rtex->cmask_buffer != &rtex->resource)
r600_resource_reference(&rtex->cmask_buffer, NULL);
@@ -466,8 +463,8 @@ static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen,
* \param rctx the current context if you have one, or rscreen->aux_context
* if you don't.
*/
-bool r600_texture_disable_dcc(struct r600_common_context *rctx,
- struct r600_texture *rtex)
+bool si_texture_disable_dcc(struct r600_common_context *rctx,
+ struct r600_texture *rtex)
{
struct r600_common_screen *rscreen = rctx->screen;
@@ -624,7 +621,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
* access.
*/
if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
- if (r600_texture_disable_dcc(rctx, rtex))
+ if (si_texture_disable_dcc(rctx, rtex))
update_metadata = true;
}
@@ -681,7 +678,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
&res->b.b, 0, &box);
/* Move the new buffer storage to the old pipe_resource. */
- r600_replace_buffer_storage(&rctx->b, &res->b.b, newb);
+ si_replace_buffer_storage(&rctx->b, &res->b.b, newb);
pipe_resource_reference(&newb, NULL);
assert(res->b.b.bind & PIPE_BIND_SHARED);
@@ -730,10 +727,10 @@ static void r600_texture_destroy(struct pipe_screen *screen,
static const struct u_resource_vtbl r600_texture_vtbl;
/* The number of samples can be specified independently of the texture. */
-void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
- struct r600_texture *rtex,
- unsigned nr_samples,
- struct r600_fmask_info *out)
+void si_texture_get_fmask_info(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex,
+ unsigned nr_samples,
+ struct r600_fmask_info *out)
{
/* FMASK is allocated like an ordinary texture. */
struct pipe_resource templ = rtex->resource.b.b;
@@ -751,17 +748,6 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
templ.nr_samples = 1;
flags = rtex->surface.flags | RADEON_SURF_FMASK;
- if (rscreen->chip_class <= CAYMAN) {
- /* Use the same parameters and tile mode. */
- fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw;
- fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh;
- fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea;
- fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split;
-
- if (nr_samples <= 4)
- fmask.u.legacy.bankh = 4;
- }
-
switch (nr_samples) {
case 2:
case 4:
@@ -775,13 +761,6 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
return;
}
- /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
- * This can be fixed by writing a separate FMASK allocator specifically
- * for R600-R700 asics. */
- if (rscreen->chip_class <= R700) {
- bpe *= 2;
- }
-
if (rscreen->ws->surface_init(rscreen->ws, &templ, flags, bpe,
RADEON_SURF_MODE_2D, &fmask)) {
R600_ERR("Got error in surface_init while allocating FMASK.\n");
@@ -805,47 +784,13 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
struct r600_texture *rtex)
{
- r600_texture_get_fmask_info(rscreen, rtex,
+ si_texture_get_fmask_info(rscreen, rtex,
rtex->resource.b.b.nr_samples, &rtex->fmask);
rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
rtex->size = rtex->fmask.offset + rtex->fmask.size;
}
-void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
- struct r600_texture *rtex,
- struct r600_cmask_info *out)
-{
- unsigned cmask_tile_width = 8;
- unsigned cmask_tile_height = 8;
- unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
- unsigned element_bits = 4;
- unsigned cmask_cache_bits = 1024;
- unsigned num_pipes = rscreen->info.num_tile_pipes;
- unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
-
- unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
- unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
- unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
- unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
- unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
-
- unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width);
- unsigned height = align(rtex->resource.b.b.height0, macro_tile_height);
-
- unsigned base_align = num_pipes * pipe_interleave_bytes;
- unsigned slice_bytes =
- ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
-
- assert(macro_tile_width % 128 == 0);
- assert(macro_tile_height % 128 == 0);
-
- out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
- out->alignment = MAX2(256, base_align);
- out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
- align(slice_bytes, base_align);
-}
-
static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
struct r600_texture *rtex,
struct r600_cmask_info *out)
@@ -903,19 +848,12 @@ static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
struct r600_texture *rtex)
{
- if (rscreen->chip_class >= SI) {
- si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
- } else {
- r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
- }
+ si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
rtex->size = rtex->cmask.offset + rtex->cmask.size;
- if (rscreen->chip_class >= SI)
- rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
- else
- rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
+ rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
}
static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
@@ -926,14 +864,10 @@ static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen
assert(rtex->cmask.size == 0);
- if (rscreen->chip_class >= SI) {
- si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
- } else {
- r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
- }
+ si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
rtex->cmask_buffer = (struct r600_resource *)
- r600_aligned_buffer_create(&rscreen->b,
+ si_aligned_buffer_create(&rscreen->b,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
rtex->cmask.size,
@@ -946,10 +880,7 @@ static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen
/* update colorbuffer state bits */
rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
- if (rscreen->chip_class >= SI)
- rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
- else
- rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
+ rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
p_atomic_inc(&rscreen->compressed_colortex_counter);
}
@@ -965,16 +896,6 @@ static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
rtex->surface.htile_size = 0;
- if (rscreen->chip_class <= EVERGREEN &&
- rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
- return;
-
- /* HW bug on R6xx. */
- if (rscreen->chip_class == R600 &&
- (rtex->resource.b.b.width0 > 7680 ||
- rtex->resource.b.b.height0 > 7680))
- return;
-
/* HTILE is broken with 1D tiling on old kernels and CIK. */
if (rscreen->chip_class >= CIK &&
rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
@@ -1045,8 +966,8 @@ static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
rtex->size = rtex->htile_offset + rtex->surface.htile_size;
}
-void r600_print_texture_info(struct r600_common_screen *rscreen,
- struct r600_texture *rtex, struct u_log_context *log)
+void si_print_texture_info(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex, struct u_log_context *log)
{
int i;
@@ -1252,21 +1173,12 @@ r600_texture_create_object(struct pipe_screen *screen,
rtex->ps_draw_ratio = 0;
if (rtex->is_depth) {
- if (base->flags & (R600_RESOURCE_FLAG_TRANSFER |
- R600_RESOURCE_FLAG_FLUSHED_DEPTH) ||
- rscreen->chip_class >= EVERGREEN) {
- if (rscreen->chip_class >= GFX9) {
- rtex->can_sample_z = true;
- rtex->can_sample_s = true;
- } else {
- rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
- rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
- }
+ if (rscreen->chip_class >= GFX9) {
+ rtex->can_sample_z = true;
+ rtex->can_sample_s = true;
} else {
- if (rtex->resource.b.b.nr_samples <= 1 &&
- (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM ||
- rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT))
- rtex->can_sample_z = true;
+ rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
+ rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
}
if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
@@ -1304,14 +1216,14 @@ r600_texture_create_object(struct pipe_screen *screen,
/* Now create the backing buffer. */
if (!buf) {
- r600_init_resource_fields(rscreen, resource, rtex->size,
+ si_init_resource_fields(rscreen, resource, rtex->size,
rtex->surface.surf_alignment);
/* Displayable surfaces are not suballocated. */
if (resource->b.b.bind & PIPE_BIND_SCANOUT)
resource->flags |= RADEON_FLAG_NO_SUBALLOC;
- if (!r600_alloc_resource(rscreen, resource)) {
+ if (!si_alloc_resource(rscreen, resource)) {
FREE(rtex);
return NULL;
}
@@ -1329,7 +1241,7 @@ r600_texture_create_object(struct pipe_screen *screen,
if (rtex->cmask.size) {
/* Initialize the cmask to 0xCC (= compressed state). */
- r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
+ si_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
rtex->cmask.offset, rtex->cmask.size,
0xCCCCCCCC);
}
@@ -1339,7 +1251,7 @@ r600_texture_create_object(struct pipe_screen *screen,
if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile)
clear_value = 0x0000030F;
- r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(rscreen, &rtex->resource.b.b,
rtex->htile_offset,
rtex->surface.htile_size,
clear_value);
@@ -1347,7 +1259,7 @@ r600_texture_create_object(struct pipe_screen *screen,
/* Initialize DCC only if the texture is not being imported. */
if (!buf && rtex->dcc_offset) {
- r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(rscreen, &rtex->resource.b.b,
rtex->dcc_offset,
rtex->surface.dcc_size,
0xFFFFFFFF);
@@ -1369,7 +1281,7 @@ r600_texture_create_object(struct pipe_screen *screen,
puts("Texture:");
struct u_log_context log;
u_log_context_init(&log);
- r600_print_texture_info(rscreen, rtex, &log);
+ si_print_texture_info(rscreen, rtex, &log);
u_log_new_page_print(&log, stdout);
fflush(stdout);
u_log_context_destroy(&log);
@@ -1403,13 +1315,6 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
(templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY))
return RADEON_SURF_MODE_2D;
- /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
- if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
- (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
- (templ->target == PIPE_TEXTURE_2D ||
- templ->target == PIPE_TEXTURE_3D))
- force_tiling = true;
-
/* Handle common candidates for the linear mode.
* Compressed textures and DB surfaces must always be tiled.
*/
@@ -1425,8 +1330,7 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
/* Cursors are linear on SI.
* (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
- if (rscreen->chip_class >= SI &&
- (templ->bind & PIPE_BIND_CURSOR))
+ if (templ->bind & PIPE_BIND_CURSOR)
return RADEON_SURF_MODE_LINEAR_ALIGNED;
if (templ->bind & PIPE_BIND_LINEAR)
@@ -1455,8 +1359,8 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
return RADEON_SURF_MODE_2D;
}
-struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
- const struct pipe_resource *templ)
+struct pipe_resource *si_texture_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
{
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
struct radeon_surf surface = {0};
@@ -1531,9 +1435,9 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
return &rtex->resource.b.b;
}
-bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
- struct pipe_resource *texture,
- struct r600_texture **staging)
+bool si_init_flushed_depth_texture(struct pipe_context *ctx,
+ struct pipe_resource *texture,
+ struct r600_texture **staging)
{
struct r600_texture *rtex = (struct r600_texture*)texture;
struct pipe_resource resource;
@@ -1633,9 +1537,7 @@ static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
unsigned transfer_usage,
const struct pipe_box *box)
{
- /* r600g doesn't react to dirty_tex_descriptor_counter */
- return rscreen->chip_class >= SI &&
- !rtex->resource.b.is_shared &&
+ return !rtex->resource.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
rtex->resource.b.b.last_level == 0 &&
util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
@@ -1654,7 +1556,7 @@ static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
assert(rtex->surface.is_linear);
/* Reallocate the buffer in the same pipe_resource. */
- r600_alloc_resource(rscreen, &rtex->resource);
+ si_alloc_resource(rscreen, &rtex->resource);
/* Initialize the CMASK base address (needed even without CMASK). */
rtex->cmask.base_address_reg =
@@ -1718,7 +1620,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
rtex->resource.domains & RADEON_DOMAIN_VRAM ||
rtex->resource.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
- else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
+ else if (si_rings_is_buffer_referenced(rctx, rtex->resource.buf,
RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rtex->resource.buf, 0,
RADEON_USAGE_READWRITE)) {
@@ -1757,7 +1659,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
- if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
+ if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
R600_ERR("failed to create temporary texture to hold untiled copy\n");
FREE(trans);
return NULL;
@@ -1784,7 +1686,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
} else {
/* XXX: only readback the rectangle which is being mapped? */
/* XXX: when discard is true, no need to read back from depth texture */
- if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
+ if (!si_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
R600_ERR("failed to create temporary texture to hold untiled copy\n");
FREE(trans);
return NULL;
@@ -1840,7 +1742,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
buf = &rtex->resource;
}
- if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
+ if (!(map = si_buffer_map_sync_with_rings(rctx, buf, usage))) {
r600_resource_reference(&trans->staging, NULL);
FREE(trans);
return NULL;
@@ -2010,15 +1912,15 @@ void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx,
if (vi_dcc_enabled(rtex, level) &&
!vi_dcc_formats_compatible(tex->format, view_format))
- if (!r600_texture_disable_dcc(rctx, (struct r600_texture*)tex))
+ if (!si_texture_disable_dcc(rctx, (struct r600_texture*)tex))
rctx->decompress_dcc(&rctx->b, rtex);
}
-struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
- struct pipe_resource *texture,
- const struct pipe_surface *templ,
- unsigned width0, unsigned height0,
- unsigned width, unsigned height)
+struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
+ struct pipe_resource *texture,
+ const struct pipe_surface *templ,
+ unsigned width0, unsigned height0,
+ unsigned width, unsigned height)
{
struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
@@ -2079,7 +1981,7 @@ static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
}
}
- return r600_create_surface_custom(pipe, tex, templ,
+ return si_create_surface_custom(pipe, tex, templ,
width0, height0,
width, height);
}
@@ -2159,7 +2061,7 @@ static void r600_clear_texture(struct pipe_context *pipe,
pipe_surface_reference(&sf, NULL);
}
-unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)
+unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap)
{
const struct util_format_description *desc = util_format_description(format);
@@ -2380,7 +2282,7 @@ static void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
tex->last_dcc_separate_buffer = NULL;
} else {
tex->dcc_separate_buffer = (struct r600_resource*)
- r600_aligned_buffer_create(rctx->b.screen,
+ si_aligned_buffer_create(rctx->b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
tex->surface.dcc_size,
@@ -2416,7 +2318,7 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
/* Read the results. */
ctx->get_query_result(ctx, rctx->dcc_stats[i].ps_stats[2],
true, &result);
- r600_query_hw_reset_buffers(rctx,
+ si_query_hw_reset_buffers(rctx,
(struct r600_query_hw*)
rctx->dcc_stats[i].ps_stats[2]);
@@ -2527,7 +2429,7 @@ static bool vi_get_fast_clear_parameters(enum pipe_format surface_format,
util_format_is_alpha(surface_format)) {
extra_channel = -1;
} else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
- if(r600_translate_colorswap(surface_format, false) <= 1)
+ if(si_translate_colorswap(surface_format, false) <= 1)
extra_channel = desc->nr_channels - 1;
else
extra_channel = 0;
@@ -2725,7 +2627,7 @@ static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
p_atomic_inc(&rscreen->dirty_tex_counter);
}
-void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
+void si_do_fast_color_clear(struct r600_common_context *rctx,
struct pipe_framebuffer_state *fb,
struct r600_atom *fb_state,
unsigned *buffers, ubyte *dirty_cbufs,
@@ -2858,8 +2760,7 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
}
/* We can change the micro tile mode before a full clear. */
- if (rctx->screen->chip_class >= SI)
- si_set_optimal_micro_tile_mode(rctx->screen, tex);
+ si_set_optimal_micro_tile_mode(rctx->screen, tex);
evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
@@ -2982,7 +2883,7 @@ r600_texture_from_memobj(struct pipe_screen *screen,
return &rtex->resource.b.b;
}
-void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
+void si_init_screen_texture_functions(struct r600_common_screen *rscreen)
{
rscreen->b.resource_from_handle = r600_texture_from_handle;
rscreen->b.resource_get_handle = r600_texture_get_handle;
@@ -2991,7 +2892,7 @@ void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
rscreen->b.memobj_destroy = r600_memobj_destroy;
}
-void r600_init_context_texture_functions(struct r600_common_context *rctx)
+void si_init_context_texture_functions(struct r600_common_context *rctx)
{
rctx->b.create_surface = r600_create_surface;
rctx->b.surface_destroy = r600_surface_destroy;