diff options
-rw-r--r-- | src/amd/vulkan/radv_device.c | 10 | ||||
-rw-r--r-- | src/amd/vulkan/radv_image.c | 26 | ||||
-rw-r--r-- | src/amd/vulkan/radv_private.h | 4 | ||||
-rw-r--r-- | src/amd/vulkan/radv_radeon_winsys.h | 3 | ||||
-rw-r--r-- | src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c | 57 |
5 files changed, 99 insertions, 1 deletions
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 402c948e52..584355e391 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -2284,6 +2284,8 @@ VkResult radv_AllocateMemory( VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR); mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd, NULL, NULL); + mem->imported = true; + if (!mem->bo) { result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR; goto fail; @@ -2536,10 +2538,12 @@ VkResult radv_BindBufferMemory( return radv_BindBufferMemory2KHR(device, 1, &info); } -VkResult radv_BindImageMemory2KHR(VkDevice device, +VkResult radv_BindImageMemory2KHR(VkDevice _device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) { + RADV_FROM_HANDLE(radv_device, device, _device); + for (uint32_t i = 0; i < bindInfoCount; ++i) { RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory); RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image); @@ -2547,6 +2551,10 @@ VkResult radv_BindImageMemory2KHR(VkDevice device, if (mem) { image->bo = mem->bo; image->offset = pBindInfos[i].memoryOffset; + + if (image->shareable && mem->imported) + radv_image_update_from_metadata(device, image); + } else { image->bo = NULL; image->offset = 0; diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c index 35c58f45ab..8488e94a96 100644 --- a/src/amd/vulkan/radv_image.c +++ b/src/amd/vulkan/radv_image.c @@ -619,6 +619,32 @@ radv_init_metadata(struct radv_device *device, radv_query_opaque_metadata(device, image, metadata); } +void +radv_image_update_from_metadata(struct radv_device *device, + struct radv_image *image) +{ + struct radeon_bo_metadata metadata; + bool scanout; + + device->ws->buffer_get_metadata(image->bo, &metadata); + + if (device->physical_device->rad_info.chip_class >= GFX9) { + scanout = metadata.u.gfx9.swizzle_mode == 0 || + metadata.u.gfx9.swizzle_mode % 4 == 2; + } else { + scanout = metadata.u.legacy.scanout; + } + + if (scanout != !!(image->surface.flags & RADEON_SURF_SCANOUT)) { + if (scanout) + image->surface.flags |= RADEON_SURF_SCANOUT; + else + image->surface.flags &= ~RADEON_SURF_SCANOUT; + + device->ws->surface_init(device->ws, &image->info, &image->surface); + } +} + /* The number of samples can be specified independently of the texture. */ static void radv_image_get_fmask_info(struct radv_device *device, diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index 5cab407211..dfd12e4da2 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -564,6 +564,7 @@ struct radv_device_memory { uint32_t type_index; VkDeviceSize map_size; void * map; + bool imported; }; @@ -1309,6 +1310,9 @@ VkResult radv_image_create(VkDevice _device, const VkAllocationCallbacks* alloc, VkImage *pImage); +void radv_image_update_from_metadata(struct radv_device *device, + struct radv_image *image); + void radv_image_view_init(struct radv_image_view *view, struct radv_device *device, const VkImageViewCreateInfo* pCreateInfo); diff --git a/src/amd/vulkan/radv_radeon_winsys.h b/src/amd/vulkan/radv_radeon_winsys.h index 52b55c38e6..b1a9758309 100644 --- a/src/amd/vulkan/radv_radeon_winsys.h +++ b/src/amd/vulkan/radv_radeon_winsys.h @@ -182,6 +182,9 @@ struct radeon_winsys { void (*buffer_unmap)(struct radeon_winsys_bo *bo); + void (*buffer_get_metadata)(struct radeon_winsys_bo *bo, + struct radeon_bo_metadata *md); + void (*buffer_set_metadata)(struct radeon_winsys_bo *bo, struct radeon_bo_metadata *md); diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c index d910aae4ba..1a786a9eab 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c @@ -464,6 +464,21 @@ radv_amdgpu_winsys_get_fd(struct radeon_winsys *_ws, return true; } +static unsigned radv_eg_tile_split(unsigned tile_split) +{ + switch (tile_split) { + case 0: tile_split = 64; break; + case 1: tile_split = 128; break; + case 2: tile_split = 256; break; + case 3: tile_split = 512; break; + default: + case 4: tile_split = 1024; break; + case 5: tile_split = 2048; break; + case 6: tile_split = 4096; break; + } + return tile_split; +} + static unsigned radv_eg_tile_split_rev(unsigned eg_tile_split) { switch (eg_tile_split) { @@ -479,6 +494,47 @@ static unsigned radv_eg_tile_split_rev(unsigned eg_tile_split) } static void +radv_amdgpu_winsys_bo_get_metadata(struct radeon_winsys_bo *_bo, + struct radeon_bo_metadata *md) +{ + struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); + struct amdgpu_bo_info info = {0}; + uint64_t tiling_flags; + int r; + + assert(bo->bo && "must not be called for slab entries"); + + r = amdgpu_bo_query_info(bo->bo, &info); + if (r) + return; + + tiling_flags = info.metadata.tiling_info; + + if (bo->ws->info.chip_class >= GFX9) { + md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); + } else { + md->u.legacy.microtile = RADEON_LAYOUT_LINEAR; + md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR; + + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */ + md->u.legacy.macrotile = RADEON_LAYOUT_TILED; + else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */ + md->u.legacy.microtile = RADEON_LAYOUT_TILED; + + md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + md->u.legacy.tile_split = radv_eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT)); + md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */ + } + + md->size_metadata = info.metadata.size_metadata; + memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata)); +} + +static void radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys_bo *_bo, struct radeon_bo_metadata *md) { @@ -525,6 +581,7 @@ void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws) ws->base.buffer_unmap = radv_amdgpu_winsys_bo_unmap; ws->base.buffer_from_fd = radv_amdgpu_winsys_bo_from_fd; ws->base.buffer_get_fd = radv_amdgpu_winsys_get_fd; + ws->base.buffer_get_metadata = radv_amdgpu_winsys_bo_get_metadata; ws->base.buffer_set_metadata = radv_amdgpu_winsys_bo_set_metadata; ws->base.buffer_virtual_bind = radv_amdgpu_winsys_bo_virtual_bind; } |