summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Pitoiset <samuel.pitoiset@gmail.com>2024-04-02 18:06:21 +0200
committerMarge Bot <emma+marge@anholt.net>2024-04-04 21:57:45 +0000
commit0388df3d089a5bf85bd8b9558f56f897e5b13743 (patch)
tree02b13a430c031c696d696a3633dd22f189601580
parent67ac6e75c6d31e55ac537f1f4611d9099f3e9402 (diff)
radv: replace RADV_FROM_HANDLE by VK_FROM_HANDLE
It was exactly the same thing. Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28568>
-rw-r--r--src/amd/vulkan/layers/radv_ctx_roll_layer.c6
-rw-r--r--src/amd/vulkan/layers/radv_metro_exodus.c2
-rw-r--r--src/amd/vulkan/layers/radv_rage2.c2
-rw-r--r--src/amd/vulkan/layers/radv_rmv_layer.c8
-rw-r--r--src/amd/vulkan/layers/radv_rra_layer.c32
-rw-r--r--src/amd/vulkan/layers/radv_sqtt_layer.c44
-rw-r--r--src/amd/vulkan/meta/radv_meta_blit.c6
-rw-r--r--src/amd/vulkan/meta/radv_meta_buffer.c14
-rw-r--r--src/amd/vulkan/meta/radv_meta_clear.c10
-rw-r--r--src/amd/vulkan/meta/radv_meta_copy.c18
-rw-r--r--src/amd/vulkan/meta/radv_meta_resolve.c6
-rw-r--r--src/amd/vulkan/radix_sort/radv_radix_sort.c26
-rw-r--r--src/amd/vulkan/radv_acceleration_structure.c52
-rw-r--r--src/amd/vulkan/radv_android.c14
-rw-r--r--src/amd/vulkan/radv_buffer.c18
-rw-r--r--src/amd/vulkan/radv_buffer_view.c8
-rw-r--r--src/amd/vulkan/radv_cmd_buffer.c253
-rw-r--r--src/amd/vulkan/radv_debug.c2
-rw-r--r--src/amd/vulkan/radv_descriptor_set.c81
-rw-r--r--src/amd/vulkan/radv_device.c24
-rw-r--r--src/amd/vulkan/radv_device_generated_commands.c12
-rw-r--r--src/amd/vulkan/radv_device_memory.c16
-rw-r--r--src/amd/vulkan/radv_event.c14
-rw-r--r--src/amd/vulkan/radv_formats.c10
-rw-r--r--src/amd/vulkan/radv_image.c22
-rw-r--r--src/amd/vulkan/radv_image_view.c10
-rw-r--r--src/amd/vulkan/radv_instance.c4
-rw-r--r--src/amd/vulkan/radv_perfcounter.c4
-rw-r--r--src/amd/vulkan/radv_physical_device.c6
-rw-r--r--src/amd/vulkan/radv_pipeline.c16
-rw-r--r--src/amd/vulkan/radv_pipeline_cache.c4
-rw-r--r--src/amd/vulkan/radv_pipeline_compute.c4
-rw-r--r--src/amd/vulkan/radv_pipeline_graphics.c14
-rw-r--r--src/amd/vulkan/radv_pipeline_rt.c22
-rw-r--r--src/amd/vulkan/radv_private.h2
-rw-r--r--src/amd/vulkan/radv_query.c42
-rw-r--r--src/amd/vulkan/radv_queue.c6
-rw-r--r--src/amd/vulkan/radv_rmv.c14
-rw-r--r--src/amd/vulkan/radv_rra.c6
-rw-r--r--src/amd/vulkan/radv_sampler.c6
-rw-r--r--src/amd/vulkan/radv_shader_object.c16
-rw-r--r--src/amd/vulkan/radv_video.c40
-rw-r--r--src/amd/vulkan/radv_wsi.c8
43 files changed, 460 insertions, 464 deletions
diff --git a/src/amd/vulkan/layers/radv_ctx_roll_layer.c b/src/amd/vulkan/layers/radv_ctx_roll_layer.c
index 0da6c66662d..31872468b34 100644
--- a/src/amd/vulkan/layers/radv_ctx_roll_layer.c
+++ b/src/amd/vulkan/layers/radv_ctx_roll_layer.c
@@ -27,7 +27,7 @@
VKAPI_ATTR VkResult VKAPI_CALL
ctx_roll_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
simple_mtx_lock(&device->ctx_roll_mtx);
@@ -45,7 +45,7 @@ ctx_roll_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
VKAPI_ATTR VkResult VKAPI_CALL
ctx_roll_QueueSubmit2(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2 *pSubmits, VkFence _fence)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
simple_mtx_lock(&device->ctx_roll_mtx);
@@ -54,7 +54,7 @@ ctx_roll_QueueSubmit2(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2
for (uint32_t submit_index = 0; submit_index < submitCount; submit_index++) {
const VkSubmitInfo2 *submit = pSubmits + submit_index;
for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) {
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, submit->pCommandBufferInfos[i].commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, submit->pCommandBufferInfos[i].commandBuffer);
fprintf(device->ctx_roll_file, "\n%s:\n", vk_object_base_name(&cmd_buffer->vk.base));
device->ws->cs_dump(cmd_buffer->cs, device->ctx_roll_file, NULL, 0, RADV_CS_DUMP_TYPE_CTX_ROLLS);
}
diff --git a/src/amd/vulkan/layers/radv_metro_exodus.c b/src/amd/vulkan/layers/radv_metro_exodus.c
index 63389b9ce04..0b1920b3728 100644
--- a/src/amd/vulkan/layers/radv_metro_exodus.c
+++ b/src/amd/vulkan/layers/radv_metro_exodus.c
@@ -33,6 +33,6 @@ metro_exodus_GetSemaphoreCounterValue(VkDevice _device, VkSemaphore _semaphore,
return VK_SUCCESS;
}
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return device->layer_dispatch.app.GetSemaphoreCounterValue(_device, _semaphore, pValue);
}
diff --git a/src/amd/vulkan/layers/radv_rage2.c b/src/amd/vulkan/layers/radv_rage2.c
index 851eafbcf91..a9575e9d32f 100644
--- a/src/amd/vulkan/layers/radv_rage2.c
+++ b/src/amd/vulkan/layers/radv_rage2.c
@@ -31,7 +31,7 @@ rage2_CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginI
VkSubpassContents contents)
{
VK_FROM_HANDLE(vk_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
VkRenderPassBeginInfo render_pass_begin = {
diff --git a/src/amd/vulkan/layers/radv_rmv_layer.c b/src/amd/vulkan/layers/radv_rmv_layer.c
index cbe5cd9f82b..a507580923b 100644
--- a/src/amd/vulkan/layers/radv_rmv_layer.c
+++ b/src/amd/vulkan/layers/radv_rmv_layer.c
@@ -29,7 +29,7 @@
VKAPI_ATTR VkResult VKAPI_CALL
rmv_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
VkResult res = device->layer_dispatch.rmv.QueuePresentKHR(_queue, pPresentInfo);
@@ -44,7 +44,7 @@ rmv_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
VKAPI_ATTR VkResult VKAPI_CALL
rmv_FlushMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult res = device->layer_dispatch.rmv.FlushMappedMemoryRanges(_device, memoryRangeCount, pMemoryRanges);
if (res != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)
@@ -58,7 +58,7 @@ rmv_FlushMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount, const V
VKAPI_ATTR VkResult VKAPI_CALL
rmv_InvalidateMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult res = device->layer_dispatch.rmv.InvalidateMappedMemoryRanges(_device, memoryRangeCount, pMemoryRanges);
if (res != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)
@@ -73,7 +73,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
rmv_SetDebugUtilsObjectNameEXT(VkDevice _device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo)
{
assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT);
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult result = device->layer_dispatch.rmv.SetDebugUtilsObjectNameEXT(_device, pNameInfo);
if (result != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)
diff --git a/src/amd/vulkan/layers/radv_rra_layer.c b/src/amd/vulkan/layers/radv_rra_layer.c
index d78a64ed945..08f11377fd0 100644
--- a/src/amd/vulkan/layers/radv_rra_layer.c
+++ b/src/amd/vulkan/layers/radv_rra_layer.c
@@ -32,7 +32,7 @@
VKAPI_ATTR VkResult VKAPI_CALL
rra_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
if (device->rra_trace.triggered) {
@@ -91,7 +91,7 @@ rra_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
static VkResult
rra_init_accel_struct_data_buffer(VkDevice vk_device, struct radv_rra_accel_struct_data *data)
{
- RADV_FROM_HANDLE(radv_device, device, vk_device);
+ VK_FROM_HANDLE(radv_device, device, vk_device);
VkBufferCreateInfo buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = data->size,
@@ -136,8 +136,8 @@ rra_CreateAccelerationStructureKHR(VkDevice _device, const VkAccelerationStructu
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureKHR *pAccelerationStructure)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
VkResult result = device->layer_dispatch.rra.CreateAccelerationStructureKHR(_device, pCreateInfo, pAllocator,
pAccelerationStructure);
@@ -145,7 +145,7 @@ rra_CreateAccelerationStructureKHR(VkDevice _device, const VkAccelerationStructu
if (result != VK_SUCCESS)
return result;
- RADV_FROM_HANDLE(vk_acceleration_structure, structure, *pAccelerationStructure);
+ VK_FROM_HANDLE(vk_acceleration_structure, structure, *pAccelerationStructure);
simple_mtx_lock(&device->rra_trace.data_mtx);
struct radv_rra_accel_struct_data *data = calloc(1, sizeof(struct radv_rra_accel_struct_data));
@@ -195,7 +195,7 @@ static void
handle_accel_struct_write(VkCommandBuffer commandBuffer, struct vk_acceleration_structure *accel_struct,
struct radv_rra_accel_struct_data *data)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
VkMemoryBarrier2 barrier = {
@@ -246,14 +246,14 @@ rra_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t in
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->layer_dispatch.rra.CmdBuildAccelerationStructuresKHR(commandBuffer, infoCount, pInfos, ppBuildRangeInfos);
simple_mtx_lock(&device->rra_trace.data_mtx);
for (uint32_t i = 0; i < infoCount; ++i) {
- RADV_FROM_HANDLE(vk_acceleration_structure, structure, pInfos[i].dstAccelerationStructure);
+ VK_FROM_HANDLE(vk_acceleration_structure, structure, pInfos[i].dstAccelerationStructure);
struct hash_entry *entry = _mesa_hash_table_search(device->rra_trace.accel_structs, structure);
assert(entry);
@@ -267,14 +267,14 @@ rra_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t in
VKAPI_ATTR void VKAPI_CALL
rra_CmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR *pInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->layer_dispatch.rra.CmdCopyAccelerationStructureKHR(commandBuffer, pInfo);
simple_mtx_lock(&device->rra_trace.data_mtx);
- RADV_FROM_HANDLE(vk_acceleration_structure, structure, pInfo->dst);
+ VK_FROM_HANDLE(vk_acceleration_structure, structure, pInfo->dst);
struct hash_entry *entry = _mesa_hash_table_search(device->rra_trace.accel_structs, structure);
assert(entry);
@@ -289,14 +289,14 @@ VKAPI_ATTR void VKAPI_CALL
rra_CmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer,
const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->layer_dispatch.rra.CmdCopyMemoryToAccelerationStructureKHR(commandBuffer, pInfo);
simple_mtx_lock(&device->rra_trace.data_mtx);
- RADV_FROM_HANDLE(vk_acceleration_structure, structure, pInfo->dst);
+ VK_FROM_HANDLE(vk_acceleration_structure, structure, pInfo->dst);
struct hash_entry *entry = _mesa_hash_table_search(device->rra_trace.accel_structs, structure);
assert(entry);
@@ -314,10 +314,10 @@ rra_DestroyAccelerationStructureKHR(VkDevice _device, VkAccelerationStructureKHR
if (!_structure)
return;
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
simple_mtx_lock(&device->rra_trace.data_mtx);
- RADV_FROM_HANDLE(vk_acceleration_structure, structure, _structure);
+ VK_FROM_HANDLE(vk_acceleration_structure, structure, _structure);
struct hash_entry *entry = _mesa_hash_table_search(device->rra_trace.accel_structs, structure);
@@ -337,7 +337,7 @@ rra_DestroyAccelerationStructureKHR(VkDevice _device, VkAccelerationStructureKHR
VKAPI_ATTR VkResult VKAPI_CALL
rra_QueueSubmit2KHR(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2 *pSubmits, VkFence _fence)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
VkResult result = device->layer_dispatch.rra.QueueSubmit2KHR(_queue, submitCount, pSubmits, _fence);
@@ -350,7 +350,7 @@ rra_QueueSubmit2KHR(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2 *p
for (uint32_t submit_index = 0; submit_index < submitCount; submit_index++) {
for (uint32_t i = 0; i < pSubmits[submit_index].commandBufferInfoCount; i++) {
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pSubmits[submit_index].pCommandBufferInfos[i].commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pSubmits[submit_index].pCommandBufferInfos[i].commandBuffer);
uint32_t trace_count =
util_dynarray_num_elements(&cmd_buffer->ray_history, struct radv_rra_ray_history_data *);
if (!trace_count)
diff --git a/src/amd/vulkan/layers/radv_sqtt_layer.c b/src/amd/vulkan/layers/radv_sqtt_layer.c
index 899bea1af60..5eae9c6c9f3 100644
--- a/src/amd/vulkan/layers/radv_sqtt_layer.c
+++ b/src/amd/vulkan/layers/radv_sqtt_layer.c
@@ -672,7 +672,7 @@ radv_describe_queue_semaphore(struct radv_queue *queue, struct vk_semaphore *syn
static void
radv_handle_sqtt(VkQueue _queue)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
const struct radv_physical_device *pdev = radv_device_physical(device);
bool trigger = device->sqtt_triggered;
@@ -728,7 +728,7 @@ radv_handle_sqtt(VkQueue _queue)
VKAPI_ATTR VkResult VKAPI_CALL
sqtt_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
VkResult result;
@@ -748,7 +748,7 @@ sqtt_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
static VkResult
radv_sqtt_wsi_submit(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2 *pSubmits, VkFence _fence)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
VkCommandBufferSubmitInfo *new_cmdbufs = NULL;
struct radeon_winsys_bo *gpu_timestamp_bo;
@@ -815,7 +815,7 @@ fail:
VKAPI_ATTR VkResult VKAPI_CALL
sqtt_QueueSubmit2(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2 *pSubmits, VkFence _fence)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
const bool is_gfx_or_ace = queue->state.qf == RADV_QUEUE_GENERAL || queue->state.qf == RADV_QUEUE_COMPUTE;
VkCommandBufferSubmitInfo *new_cmdbufs = NULL;
@@ -894,7 +894,7 @@ sqtt_QueueSubmit2(VkQueue _queue, uint32_t submitCount, const VkSubmitInfo2 *pSu
.commandBuffer = post_timed_cmdbuf,
};
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBufferInfo->commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBufferInfo->commandBuffer);
radv_describe_queue_submit(queue, cmd_buffer, j, cpu_timestamp, gpu_timestamps_ptr[0], gpu_timestamps_ptr[1]);
}
@@ -923,7 +923,7 @@ fail:
}
#define EVENT_MARKER_BASE(cmd_name, api_name, event_name, ...) \
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); \
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); \
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer); \
radv_write_begin_general_api_marker(cmd_buffer, ApiCmd##api_name); \
cmd_buffer->state.current_event_type = EventCmd##event_name; \
@@ -1176,7 +1176,7 @@ sqtt_CmdDrawMeshTasksIndirectCountEXT(VkCommandBuffer commandBuffer, VkBuffer bu
#undef EVENT_MARKER_BASE
#define API_MARKER_ALIAS(cmd_name, api_name, ...) \
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); \
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); \
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer); \
radv_write_begin_general_api_marker(cmd_buffer, ApiCmd##api_name); \
device->layer_dispatch.rgp.Cmd##cmd_name(__VA_ARGS__); \
@@ -1187,7 +1187,7 @@ sqtt_CmdDrawMeshTasksIndirectCountEXT(VkCommandBuffer commandBuffer, VkBuffer bu
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline _pipeline)
{
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
API_MARKER(BindPipeline, commandBuffer, pipelineBindPoint, _pipeline);
@@ -1339,21 +1339,21 @@ sqtt_CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags fa
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, pMarkerInfo->pMarkerName);
}
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_write_user_event_marker(cmd_buffer, UserEventPop, NULL);
}
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_write_user_event_marker(cmd_buffer, UserEventTrigger, pMarkerInfo->pMarkerName);
}
@@ -1367,7 +1367,7 @@ sqtt_DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInf
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, pLabelInfo->pLabelName);
@@ -1378,7 +1378,7 @@ sqtt_CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtil
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPop, NULL);
@@ -1389,7 +1389,7 @@ sqtt_CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer)
VKAPI_ATTR void VKAPI_CALL
sqtt_CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventTrigger, pLabelInfo->pLabelName);
@@ -1706,7 +1706,7 @@ sqtt_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, ui
const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult result;
result = device->layer_dispatch.rgp.CreateGraphicsPipelines(_device, pipelineCache, count, pCreateInfos, pAllocator,
@@ -1715,7 +1715,7 @@ sqtt_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, ui
return result;
for (unsigned i = 0; i < count; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
if (!pipeline)
continue;
@@ -1748,7 +1748,7 @@ sqtt_CreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uin
const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult result;
result = device->layer_dispatch.rgp.CreateComputePipelines(_device, pipelineCache, count, pCreateInfos, pAllocator,
@@ -1757,7 +1757,7 @@ sqtt_CreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uin
return result;
for (unsigned i = 0; i < count; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
if (!pipeline)
continue;
@@ -1783,7 +1783,7 @@ sqtt_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR defer
const VkRayTracingPipelineCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult result;
result = device->layer_dispatch.rgp.CreateRayTracingPipelinesKHR(_device, deferredOperation, pipelineCache, count,
@@ -1792,7 +1792,7 @@ sqtt_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR defer
return result;
for (unsigned i = 0; i < count; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pPipelines[i]);
if (!pipeline)
continue;
@@ -1819,8 +1819,8 @@ fail:
VKAPI_ATTR void VKAPI_CALL
sqtt_DestroyPipeline(VkDevice _device, VkPipeline _pipeline, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
diff --git a/src/amd/vulkan/meta/radv_meta_blit.c b/src/amd/vulkan/meta/radv_meta_blit.c
index 36b6b60c2e6..1c3b28cb962 100644
--- a/src/amd/vulkan/meta/radv_meta_blit.c
+++ b/src/amd/vulkan/meta/radv_meta_blit.c
@@ -556,9 +556,9 @@ blit_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *src_image, VkI
VKAPI_ATTR void VKAPI_CALL
radv_CmdBlitImage2(VkCommandBuffer commandBuffer, const VkBlitImageInfo2 *pBlitImageInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_image, src_image, pBlitImageInfo->srcImage);
- RADV_FROM_HANDLE(radv_image, dst_image, pBlitImageInfo->dstImage);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_image, src_image, pBlitImageInfo->srcImage);
+ VK_FROM_HANDLE(radv_image, dst_image, pBlitImageInfo->dstImage);
for (unsigned r = 0; r < pBlitImageInfo->regionCount; r++) {
blit_image(cmd_buffer, src_image, pBlitImageInfo->srcImageLayout, dst_image, pBlitImageInfo->dstImageLayout,
diff --git a/src/amd/vulkan/meta/radv_meta_buffer.c b/src/amd/vulkan/meta/radv_meta_buffer.c
index 8fe14bbe204..efba7c24854 100644
--- a/src/amd/vulkan/meta/radv_meta_buffer.c
+++ b/src/amd/vulkan/meta/radv_meta_buffer.c
@@ -283,8 +283,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize fillSize,
uint32_t data)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
fillSize = vk_buffer_range(&dst_buffer->vk, dstOffset, fillSize) & ~3ull;
@@ -314,9 +314,9 @@ copy_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *src_buffer,
VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCopyBufferInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
- RADV_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
+ VK_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
for (unsigned r = 0; r < pCopyBufferInfo->regionCount; r++) {
copy_buffer(cmd_buffer, src_buffer, dst_buffer, &pCopyBufferInfo->pRegions[r]);
@@ -350,8 +350,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize,
const void *pData)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
uint64_t va = radv_buffer_get_va(dst_buffer->bo);
va += dstOffset + dst_buffer->offset;
diff --git a/src/amd/vulkan/meta/radv_meta_clear.c b/src/amd/vulkan/meta/radv_meta_clear.c
index 5b53e2d7a0e..670a0b1e977 100644
--- a/src/amd/vulkan/meta/radv_meta_clear.c
+++ b/src/amd/vulkan/meta/radv_meta_clear.c
@@ -2177,8 +2177,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image_h, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_image, image, image_h);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_image, image, image_h);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_meta_saved_state saved_state;
bool cs;
@@ -2205,8 +2205,8 @@ radv_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image_h, V
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_image, image, image_h);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_image, image, image_h);
struct radv_meta_saved_state saved_state;
/* Clear commands (except vkCmdClearAttachments) should not be affected by conditional rendering. */
@@ -2223,7 +2223,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment *pAttachments,
uint32_t rectCount, const VkClearRect *pRects)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_meta_saved_state saved_state;
enum radv_cmd_flush_bits pre_flush = 0;
enum radv_cmd_flush_bits post_flush = 0;
diff --git a/src/amd/vulkan/meta/radv_meta_copy.c b/src/amd/vulkan/meta/radv_meta_copy.c
index 3ba0a1e76a0..4c6cdb2c93f 100644
--- a/src/amd/vulkan/meta/radv_meta_copy.c
+++ b/src/amd/vulkan/meta/radv_meta_copy.c
@@ -238,9 +238,9 @@ copy_buffer_to_image(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *buf
VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyBufferToImage2(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2 *pCopyBufferToImageInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer);
- RADV_FROM_HANDLE(radv_image, dst_image, pCopyBufferToImageInfo->dstImage);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferToImageInfo->srcBuffer);
+ VK_FROM_HANDLE(radv_image, dst_image, pCopyBufferToImageInfo->dstImage);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
@@ -378,9 +378,9 @@ copy_image_to_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_buffer *buf
VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyImageToBuffer2(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2 *pCopyImageToBufferInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_image, src_image, pCopyImageToBufferInfo->srcImage);
- RADV_FROM_HANDLE(radv_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_image, src_image, pCopyImageToBufferInfo->srcImage);
+ VK_FROM_HANDLE(radv_buffer, dst_buffer, pCopyImageToBufferInfo->dstBuffer);
for (unsigned r = 0; r < pCopyImageToBufferInfo->regionCount; r++) {
copy_image_to_buffer(cmd_buffer, dst_buffer, src_image, pCopyImageToBufferInfo->srcImageLayout,
@@ -617,9 +617,9 @@ copy_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *src_image, VkI
VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2 *pCopyImageInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_image, src_image, pCopyImageInfo->srcImage);
- RADV_FROM_HANDLE(radv_image, dst_image, pCopyImageInfo->dstImage);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_image, src_image, pCopyImageInfo->srcImage);
+ VK_FROM_HANDLE(radv_image, dst_image, pCopyImageInfo->dstImage);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
diff --git a/src/amd/vulkan/meta/radv_meta_resolve.c b/src/amd/vulkan/meta/radv_meta_resolve.c
index 19ceefe2c4a..841a7951b61 100644
--- a/src/amd/vulkan/meta/radv_meta_resolve.c
+++ b/src/amd/vulkan/meta/radv_meta_resolve.c
@@ -503,9 +503,9 @@ resolve_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *src_image,
VKAPI_ATTR void VKAPI_CALL
radv_CmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2 *pResolveImageInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_image, src_image, pResolveImageInfo->srcImage);
- RADV_FROM_HANDLE(radv_image, dst_image, pResolveImageInfo->dstImage);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_image, src_image, pResolveImageInfo->srcImage);
+ VK_FROM_HANDLE(radv_image, dst_image, pResolveImageInfo->dstImage);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
VkImageLayout src_image_layout = pResolveImageInfo->srcImageLayout;
diff --git a/src/amd/vulkan/radix_sort/radv_radix_sort.c b/src/amd/vulkan/radix_sort/radv_radix_sort.c
index d1f0bf07714..3e3d07af779 100644
--- a/src/amd/vulkan/radix_sort/radv_radix_sort.c
+++ b/src/amd/vulkan/radix_sort/radv_radix_sort.c
@@ -100,14 +100,14 @@ VKAPI_ATTR VkResult VKAPI_CALL
vkCreateShaderModule(VkDevice _device, const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return device->vk.dispatch_table.CreateShaderModule(_device, pCreateInfo, pAllocator, pShaderModule);
}
VKAPI_ATTR void VKAPI_CALL
vkDestroyShaderModule(VkDevice _device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
device->vk.dispatch_table.DestroyShaderModule(_device, shaderModule, pAllocator);
}
@@ -115,14 +115,14 @@ VKAPI_ATTR VkResult VKAPI_CALL
vkCreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return device->vk.dispatch_table.CreatePipelineLayout(_device, pCreateInfo, pAllocator, pPipelineLayout);
}
VKAPI_ATTR void VKAPI_CALL
vkDestroyPipelineLayout(VkDevice _device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
device->vk.dispatch_table.DestroyPipelineLayout(_device, pipelineLayout, pAllocator);
}
@@ -131,7 +131,7 @@ vkCreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32
const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return device->vk.dispatch_table.CreateComputePipelines(_device, pipelineCache, createInfoCount, pCreateInfos,
pAllocator, pPipelines);
}
@@ -139,7 +139,7 @@ vkCreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32
VKAPI_ATTR void VKAPI_CALL
vkDestroyPipeline(VkDevice _device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
device->vk.dispatch_table.DestroyPipeline(_device, pipeline, pAllocator);
}
@@ -150,7 +150,7 @@ vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStag
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->vk.dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
@@ -162,7 +162,7 @@ VKAPI_ATTR void VKAPI_CALL
vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
uint32_t offset, uint32_t size, const void *pValues)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->vk.dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
@@ -171,7 +171,7 @@ vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkSha
VKAPI_ATTR void VKAPI_CALL
vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->vk.dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
@@ -180,7 +180,7 @@ vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBin
VKAPI_ATTR void VKAPI_CALL
vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->vk.dispatch_table.CmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ);
@@ -189,7 +189,7 @@ vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t grou
VKAPI_ATTR VkDeviceAddress VKAPI_CALL
vkGetBufferDeviceAddress(VkDevice _device, const VkBufferDeviceAddressInfo *pInfo)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return device->vk.dispatch_table.GetBufferDeviceAddress(_device, pInfo);
}
@@ -197,7 +197,7 @@ VKAPI_ATTR void VKAPI_CALL
vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
uint32_t data)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->vk.dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
@@ -206,7 +206,7 @@ vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize
VKAPI_ATTR void VKAPI_CALL
vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
device->vk.dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
diff --git a/src/amd/vulkan/radv_acceleration_structure.c b/src/amd/vulkan/radv_acceleration_structure.c
index a5b1dd39b08..b051a612490 100644
--- a/src/amd/vulkan/radv_acceleration_structure.c
+++ b/src/amd/vulkan/radv_acceleration_structure.c
@@ -281,7 +281,7 @@ radv_GetAccelerationStructureBuildSizesKHR(VkDevice _device, VkAccelerationStruc
const uint32_t *pMaxPrimitiveCounts,
VkAccelerationStructureBuildSizesInfoKHR *pSizeInfo)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
STATIC_ASSERT(sizeof(struct radv_bvh_triangle_node) == 64);
STATIC_ASSERT(sizeof(struct radv_bvh_aabb_node) == 64);
@@ -733,7 +733,7 @@ build_leaves(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos, struct bvh_state *bvh_states,
bool updateable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "leaves");
@@ -748,7 +748,7 @@ build_leaves(VkCommandBuffer commandBuffer, uint32_t infoCount,
if (bvh_states[i].config.updateable != updateable)
continue;
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
struct leaf_args leaf_consts = {
.ir = pInfos[i].scratchData.deviceAddress + bvh_states[i].scratch.ir_offset,
@@ -782,7 +782,7 @@ morton_generate(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, struct bvh_state *bvh_states,
enum radv_cmd_flush_bits flush_bits)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "morton");
@@ -815,7 +815,7 @@ morton_sort(VkCommandBuffer commandBuffer, uint32_t infoCount,
enum radv_cmd_flush_bits flush_bits)
{
/* Copyright 2019 The Fuchsia Authors. */
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "sort");
@@ -1045,7 +1045,7 @@ lbvh_build_internal(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, struct bvh_state *bvh_states,
enum radv_cmd_flush_bits flush_bits)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "lbvh");
@@ -1103,7 +1103,7 @@ static void
ploc_build_internal(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, struct bvh_state *bvh_states)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "ploc");
@@ -1142,7 +1142,7 @@ static void
encode_nodes(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, struct bvh_state *bvh_states, bool compact)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "encode");
@@ -1157,7 +1157,7 @@ encode_nodes(VkCommandBuffer commandBuffer, uint32_t infoCount,
if (bvh_states[i].config.internal_type == INTERNAL_BUILD_TYPE_UPDATE)
continue;
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
VkGeometryTypeKHR geometry_type = VK_GEOMETRY_TYPE_TRIANGLES_KHR;
@@ -1206,7 +1206,7 @@ init_header(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, struct bvh_state *bvh_states,
struct radv_bvh_batch_state *batch_state)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
if (batch_state->any_compact) {
@@ -1219,7 +1219,7 @@ init_header(VkCommandBuffer commandBuffer, uint32_t infoCount,
for (uint32_t i = 0; i < infoCount; ++i) {
if (bvh_states[i].config.internal_type == INTERNAL_BUILD_TYPE_UPDATE)
continue;
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
size_t base = offsetof(struct radv_accel_struct_header, compacted_size);
uint64_t instance_count =
@@ -1277,7 +1277,7 @@ init_geometry_infos(VkCommandBuffer commandBuffer, uint32_t infoCount,
for (uint32_t i = 0; i < infoCount; ++i) {
if (bvh_states[i].config.internal_type == INTERNAL_BUILD_TYPE_UPDATE)
continue;
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, pInfos[i].dstAccelerationStructure);
uint64_t geometry_infos_size = pInfos[i].geometryCount * sizeof(struct radv_accel_struct_geometry_info);
@@ -1305,7 +1305,7 @@ static void
update(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos, struct bvh_state *bvh_states)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
radv_write_user_event_marker(cmd_buffer, UserEventPush, "update");
@@ -1358,7 +1358,7 @@ radv_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t i
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_meta_saved_state saved_state;
@@ -1439,8 +1439,8 @@ radv_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t i
VK_FROM_HANDLE(vk_acceleration_structure, src_as, pInfos[i].srcAccelerationStructure);
VK_FROM_HANDLE(vk_acceleration_structure, dst_as, pInfos[i].dstAccelerationStructure);
- RADV_FROM_HANDLE(radv_buffer, src_as_buffer, src_as->buffer);
- RADV_FROM_HANDLE(radv_buffer, dst_as_buffer, dst_as->buffer);
+ VK_FROM_HANDLE(radv_buffer, src_as_buffer, src_as->buffer);
+ VK_FROM_HANDLE(radv_buffer, dst_as_buffer, dst_as->buffer);
/* Copy header/metadata */
radv_copy_buffer(cmd_buffer, src_as_buffer->bo, dst_as_buffer->bo, src_as_buffer->offset + src_as->offset,
@@ -1499,10 +1499,10 @@ radv_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t i
VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR *pInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(vk_acceleration_structure, src, pInfo->src);
- RADV_FROM_HANDLE(vk_acceleration_structure, dst, pInfo->dst);
- RADV_FROM_HANDLE(radv_buffer, src_buffer, src->buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(vk_acceleration_structure, src, pInfo->src);
+ VK_FROM_HANDLE(vk_acceleration_structure, dst, pInfo->dst);
+ VK_FROM_HANDLE(radv_buffer, src_buffer, src->buffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_meta_saved_state saved_state;
@@ -1541,7 +1541,7 @@ radv_GetDeviceAccelerationStructureCompatibilityKHR(VkDevice _device,
const VkAccelerationStructureVersionInfoKHR *pVersionInfo,
VkAccelerationStructureCompatibilityKHR *pCompatibility)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
bool compat = memcmp(pVersionInfo->pVersionData, pdev->driver_uuid, VK_UUID_SIZE) == 0 &&
memcmp(pVersionInfo->pVersionData + VK_UUID_SIZE, pdev->cache_uuid, VK_UUID_SIZE) == 0;
@@ -1569,8 +1569,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer,
const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(vk_acceleration_structure, dst, pInfo->dst);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(vk_acceleration_structure, dst, pInfo->dst);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_meta_saved_state saved_state;
@@ -1604,9 +1604,9 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyAccelerationStructureToMemoryKHR(VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(vk_acceleration_structure, src, pInfo->src);
- RADV_FROM_HANDLE(radv_buffer, src_buffer, src->buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(vk_acceleration_structure, src, pInfo->src);
+ VK_FROM_HANDLE(radv_buffer, src_buffer, src->buffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_meta_saved_state saved_state;
diff --git a/src/amd/vulkan/radv_android.c b/src/amd/vulkan/radv_android.c
index 7868d22c122..6ae39bfca0d 100644
--- a/src/amd/vulkan/radv_android.c
+++ b/src/amd/vulkan/radv_android.c
@@ -119,7 +119,7 @@ radv_image_from_gralloc(VkDevice device_h, const VkImageCreateInfo *base_info,
VkImage *out_image_h)
{
- RADV_FROM_HANDLE(radv_device, device, device_h);
+ VK_FROM_HANDLE(radv_device, device, device_h);
const struct radv_physical_device *pdev = radv_device_physical(device);
VkImage image_h = VK_NULL_HANDLE;
struct radv_image *image = NULL;
@@ -222,7 +222,7 @@ VkResult
radv_GetSwapchainGrallocUsageANDROID(VkDevice device_h, VkFormat format, VkImageUsageFlags imageUsage,
int *grallocUsage)
{
- RADV_FROM_HANDLE(radv_device, device, device_h);
+ VK_FROM_HANDLE(radv_device, device, device_h);
struct radv_physical_device *pdev = radv_device_physical(device);
VkPhysicalDevice pdev_h = radv_physical_device_to_handle(pdev);
VkResult result;
@@ -303,7 +303,7 @@ radv_GetSwapchainGrallocUsage2ANDROID(VkDevice device_h, VkFormat format, VkImag
/* Before level 26 (Android 8.0/Oreo) the loader uses
* vkGetSwapchainGrallocUsageANDROID. */
#if ANDROID_API_LEVEL >= 26
- RADV_FROM_HANDLE(radv_device, device, device_h);
+ VK_FROM_HANDLE(radv_device, device, device_h);
struct radv_physical_device *pdev = radv_device_physical(device);
VkPhysicalDevice pdev_h = radv_physical_device_to_handle(pdev);
VkResult result;
@@ -413,7 +413,7 @@ static VkResult
get_ahb_buffer_format_properties(VkDevice device_h, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferFormatPropertiesANDROID *pProperties)
{
- RADV_FROM_HANDLE(radv_device, device, device_h);
+ VK_FROM_HANDLE(radv_device, device, device_h);
struct radv_physical_device *pdev = radv_device_physical(device);
/* Get a description of buffer contents . */
@@ -486,7 +486,7 @@ static VkResult
get_ahb_buffer_format_properties2(VkDevice device_h, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferFormatProperties2ANDROID *pProperties)
{
- RADV_FROM_HANDLE(radv_device, device, device_h);
+ VK_FROM_HANDLE(radv_device, device, device_h);
struct radv_physical_device *pdev = radv_device_physical(device);
/* Get a description of buffer contents . */
@@ -559,7 +559,7 @@ VkResult
radv_GetAndroidHardwareBufferPropertiesANDROID(VkDevice device_h, const struct AHardwareBuffer *buffer,
VkAndroidHardwareBufferPropertiesANDROID *pProperties)
{
- RADV_FROM_HANDLE(radv_device, dev, device_h);
+ VK_FROM_HANDLE(radv_device, dev, device_h);
struct radv_physical_device *pdev = radv_device_physical(dev);
VkAndroidHardwareBufferFormatPropertiesANDROID *format_prop =
@@ -597,7 +597,7 @@ VkResult
radv_GetMemoryAndroidHardwareBufferANDROID(VkDevice device_h, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer)
{
- RADV_FROM_HANDLE(radv_device_memory, mem, pInfo->memory);
+ VK_FROM_HANDLE(radv_device_memory, mem, pInfo->memory);
/* This should always be set due to the export handle types being set on
* allocation. */
diff --git a/src/amd/vulkan/radv_buffer.c b/src/amd/vulkan/radv_buffer.c
index bc4b99ba782..b18f0192ace 100644
--- a/src/amd/vulkan/radv_buffer.c
+++ b/src/amd/vulkan/radv_buffer.c
@@ -121,15 +121,15 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateBuffer(VkDevice _device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkBuffer *pBuffer)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return radv_create_buffer(device, pCreateInfo, pAllocator, pBuffer, false);
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroyBuffer(VkDevice _device, VkBuffer _buffer, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
if (!buffer)
return;
@@ -140,13 +140,13 @@ radv_DestroyBuffer(VkDevice _device, VkBuffer _buffer, const VkAllocationCallbac
VKAPI_ATTR VkResult VKAPI_CALL
radv_BindBufferMemory2(VkDevice _device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_instance *instance = radv_physical_device_instance(pdev);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
- RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
- RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
+ VK_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
+ VK_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
VkBindMemoryStatusKHR *status = (void *)vk_find_struct_const(&pBindInfos[i], BIND_MEMORY_STATUS_KHR);
if (status)
@@ -248,7 +248,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceBufferMemoryRequirements(VkDevice _device, const VkDeviceBufferMemoryRequirements *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const VkBufferUsageFlagBits2KHR usage_flags = radv_get_buffer_usage_flags(pInfo->pCreateInfo);
radv_get_buffer_memory_requirements(device, pInfo->pCreateInfo->size, pInfo->pCreateInfo->flags, usage_flags,
@@ -258,14 +258,14 @@ radv_GetDeviceBufferMemoryRequirements(VkDevice _device, const VkDeviceBufferMem
VKAPI_ATTR VkDeviceAddress VKAPI_CALL
radv_GetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
return radv_buffer_get_va(buffer->bo) + buffer->offset;
}
VKAPI_ATTR uint64_t VKAPI_CALL
radv_GetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
return buffer->bo ? radv_buffer_get_va(buffer->bo) + buffer->offset : 0;
}
diff --git a/src/amd/vulkan/radv_buffer_view.c b/src/amd/vulkan/radv_buffer_view.c
index 3dcf18a9ac6..f5d780d30df 100644
--- a/src/amd/vulkan/radv_buffer_view.c
+++ b/src/amd/vulkan/radv_buffer_view.c
@@ -105,7 +105,7 @@ void
radv_buffer_view_init(struct radv_buffer_view *view, struct radv_device *device,
const VkBufferViewCreateInfo *pCreateInfo)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
uint64_t va = radv_buffer_get_va(buffer->bo) + buffer->offset;
vk_buffer_view_init(&device->vk, &view->vk, pCreateInfo);
@@ -125,7 +125,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateBufferView(VkDevice _device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_buffer_view *view;
view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -142,8 +142,8 @@ radv_CreateBufferView(VkDevice _device, const VkBufferViewCreateInfo *pCreateInf
VKAPI_ATTR void VKAPI_CALL
radv_DestroyBufferView(VkDevice _device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_buffer_view, view, bufferView);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_buffer_view, view, bufferView);
if (!view)
return;
diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c
index 556bfcea423..814b42f66aa 100644
--- a/src/amd/vulkan/radv_cmd_buffer.c
+++ b/src/amd/vulkan/radv_cmd_buffer.c
@@ -6101,7 +6101,7 @@ radv_handle_rendering_image_transition(struct radv_cmd_buffer *cmd_buffer, struc
VKAPI_ATTR VkResult VKAPI_CALL
radv_BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
VkResult result = VK_SUCCESS;
@@ -6234,7 +6234,7 @@ radv_CmdBindVertexBuffers2(VkCommandBuffer commandBuffer, uint32_t firstBinding,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
@@ -6252,7 +6252,7 @@ radv_CmdBindVertexBuffers2(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t misaligned_mask_invalid = 0;
for (uint32_t i = 0; i < bindingCount; i++) {
- RADV_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
+ VK_FROM_HANDLE(radv_buffer, buffer, pBuffers[i]);
uint32_t idx = firstBinding + i;
VkDeviceSize size = pSizes ? pSizes[i] : 0;
/* if pStrides=NULL, it shouldn't overwrite the strides specified by CmdSetVertexInputEXT */
@@ -6320,8 +6320,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdBindIndexBuffer2KHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size,
VkIndexType indexType)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, index_buffer, buffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
@@ -6375,7 +6375,7 @@ static void
radv_bind_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
const VkBindDescriptorSetsInfoKHR *pBindDescriptorSetsInfo, VkPipelineBindPoint bind_point)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, layout, pBindDescriptorSetsInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, layout, pBindDescriptorSetsInfo->layout);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
@@ -6385,7 +6385,7 @@ radv_bind_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
for (unsigned i = 0; i < pBindDescriptorSetsInfo->descriptorSetCount; ++i) {
unsigned set_idx = i + pBindDescriptorSetsInfo->firstSet;
- RADV_FROM_HANDLE(radv_descriptor_set, set, pBindDescriptorSetsInfo->pDescriptorSets[i]);
+ VK_FROM_HANDLE(radv_descriptor_set, set, pBindDescriptorSetsInfo->pDescriptorSets[i]);
if (!set)
continue;
@@ -6433,7 +6433,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdBindDescriptorSets2KHR(VkCommandBuffer commandBuffer,
const VkBindDescriptorSetsInfoKHR *pBindDescriptorSetsInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
if (pBindDescriptorSetsInfo->stageFlags & VK_SHADER_STAGE_COMPUTE_BIT) {
radv_bind_descriptor_sets(cmd_buffer, pBindDescriptorSetsInfo, VK_PIPELINE_BIND_POINT_COMPUTE);
@@ -6488,7 +6488,7 @@ radv_meta_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer, VkPipelineBind
VkPipelineLayout _layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
struct radv_descriptor_set *push_set = (struct radv_descriptor_set *)&cmd_buffer->meta_push_descriptors;
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
unsigned bo_offset;
@@ -6516,7 +6516,7 @@ static void
radv_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer, const VkPushDescriptorSetInfoKHR *pPushDescriptorSetInfo,
VkPipelineBindPoint bind_point)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, layout, pPushDescriptorSetInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, layout, pPushDescriptorSetInfo->layout);
struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_descriptor_set *push_set = (struct radv_descriptor_set *)&descriptors_state->push_set.set;
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
@@ -6548,7 +6548,7 @@ radv_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer, const VkPushDescrip
VKAPI_ATTR void VKAPI_CALL
radv_CmdPushDescriptorSet2KHR(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfoKHR *pPushDescriptorSetInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
if (pPushDescriptorSetInfo->stageFlags & VK_SHADER_STAGE_COMPUTE_BIT) {
radv_push_descriptor_set(cmd_buffer, pPushDescriptorSetInfo, VK_PIPELINE_BIND_POINT_COMPUTE);
@@ -6567,10 +6567,9 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdPushDescriptorSetWithTemplate2KHR(
VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfoKHR *pPushDescriptorSetWithTemplateInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_pipeline_layout, layout, pPushDescriptorSetWithTemplateInfo->layout);
- RADV_FROM_HANDLE(radv_descriptor_update_template, templ,
- pPushDescriptorSetWithTemplateInfo->descriptorUpdateTemplate);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_pipeline_layout, layout, pPushDescriptorSetWithTemplateInfo->layout);
+ VK_FROM_HANDLE(radv_descriptor_update_template, templ, pPushDescriptorSetWithTemplateInfo->descriptorUpdateTemplate);
struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, templ->bind_point);
struct radv_descriptor_set *push_set = (struct radv_descriptor_set *)&descriptors_state->push_set.set;
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
@@ -6594,7 +6593,7 @@ radv_CmdPushDescriptorSetWithTemplate2KHR(
VKAPI_ATTR void VKAPI_CALL
radv_CmdPushConstants2KHR(VkCommandBuffer commandBuffer, const VkPushConstantsInfoKHR *pPushConstantsInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
memcpy(cmd_buffer->push_constants + pPushConstantsInfo->offset, pPushConstantsInfo->pValues,
pPushConstantsInfo->size);
cmd_buffer->push_constant_stages |= pPushConstantsInfo->stageFlags;
@@ -6603,7 +6602,7 @@ radv_CmdPushConstants2KHR(VkCommandBuffer commandBuffer, const VkPushConstantsIn
VKAPI_ATTR VkResult VKAPI_CALL
radv_EndCommandBuffer(VkCommandBuffer commandBuffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
@@ -7076,8 +7075,8 @@ radv_reset_shader_object_state(struct radv_cmd_buffer *cmd_buffer, VkPipelineBin
VKAPI_ATTR void VKAPI_CALL
radv_CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline _pipeline)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
@@ -7226,7 +7225,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
ASSERTED const uint32_t total_count = firstViewport + viewportCount;
@@ -7249,7 +7248,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
ASSERTED const uint32_t total_count = firstScissor + scissorCount;
@@ -7267,7 +7266,7 @@ radv_CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.line.width = lineWidth;
@@ -7278,7 +7277,7 @@ radv_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4])
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
memcpy(state->dynamic.vk.cb.blend_constants, blendConstants, sizeof(float) * 4);
@@ -7289,7 +7288,7 @@ radv_CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConsta
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ds.depth.bounds_test.min = minDepthBounds;
@@ -7301,7 +7300,7 @@ radv_CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, floa
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
@@ -7315,7 +7314,7 @@ radv_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
@@ -7329,7 +7328,7 @@ radv_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags fa
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
@@ -7344,7 +7343,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
@@ -7359,7 +7358,7 @@ radv_CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDisc
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
@@ -7376,7 +7375,7 @@ radv_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocat
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetLineStippleKHR(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.line.stipple.factor = lineStippleFactor;
@@ -7388,7 +7387,7 @@ radv_CmdSetLineStippleKHR(VkCommandBuffer commandBuffer, uint32_t lineStippleFac
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetCullMode(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.cull_mode = cullMode;
@@ -7399,7 +7398,7 @@ radv_CmdSetCullMode(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode)
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetFrontFace(VkCommandBuffer commandBuffer, VkFrontFace frontFace)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.front_face = frontFace;
@@ -7410,7 +7409,7 @@ radv_CmdSetFrontFace(VkCommandBuffer commandBuffer, VkFrontFace frontFace)
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetPrimitiveTopology(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
unsigned primitive_topology = radv_translate_prim(primitiveTopology);
@@ -7443,7 +7442,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthTestEnable(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ds.depth.test_enable = depthTestEnable;
@@ -7454,7 +7453,7 @@ radv_CmdSetDepthTestEnable(VkCommandBuffer commandBuffer, VkBool32 depthTestEnab
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthWriteEnable(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ds.depth.write_enable = depthWriteEnable;
@@ -7465,7 +7464,7 @@ radv_CmdSetDepthWriteEnable(VkCommandBuffer commandBuffer, VkBool32 depthWriteEn
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthCompareOp(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ds.depth.compare_op = depthCompareOp;
@@ -7476,7 +7475,7 @@ radv_CmdSetDepthCompareOp(VkCommandBuffer commandBuffer, VkCompareOp depthCompar
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthBoundsTestEnable(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ds.depth.bounds_test.enable = depthBoundsTestEnable;
@@ -7487,7 +7486,7 @@ radv_CmdSetDepthBoundsTestEnable(VkCommandBuffer commandBuffer, VkBool32 depthBo
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetStencilTestEnable(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ds.stencil.test_enable = stencilTestEnable;
@@ -7499,7 +7498,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetStencilOp(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp,
VkStencilOp depthFailOp, VkCompareOp compareOp)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
@@ -7523,7 +7522,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize,
const VkFragmentShadingRateCombinerOpKHR combinerOps[2])
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.fsr.fragment_size = *pFragmentSize;
@@ -7536,7 +7535,7 @@ radv_CmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthBiasEnable(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.depth_bias.enable = depthBiasEnable;
@@ -7547,7 +7546,7 @@ radv_CmdSetDepthBiasEnable(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnab
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetPrimitiveRestartEnable(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ia.primitive_restart_enable = primitiveRestartEnable;
@@ -7558,7 +7557,7 @@ radv_CmdSetPrimitiveRestartEnable(VkCommandBuffer commandBuffer, VkBool32 primit
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetRasterizerDiscardEnable(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.rasterizer_discard_enable = rasterizerDiscardEnable;
@@ -7569,7 +7568,7 @@ radv_CmdSetRasterizerDiscardEnable(VkCommandBuffer commandBuffer, VkBool32 raste
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchControlPoints)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ts.patch_control_points = patchControlPoints;
@@ -7580,7 +7579,7 @@ radv_CmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchCo
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetLogicOpEXT(VkCommandBuffer commandBuffer, VkLogicOp logicOp)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
unsigned logic_op = radv_translate_blend_logic_op(logicOp);
@@ -7593,7 +7592,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkBool32 *pColorWriteEnables)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
uint8_t color_write_enable = 0;
@@ -7616,7 +7615,7 @@ radv_CmdSetVertexInputEXT(VkCommandBuffer commandBuffer, uint32_t vertexBindingD
uint32_t vertexAttributeDescriptionCount,
const VkVertexInputAttributeDescription2EXT *pVertexAttributeDescriptions)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_cmd_state *state = &cmd_buffer->state;
@@ -7695,7 +7694,7 @@ radv_CmdSetVertexInputEXT(VkCommandBuffer commandBuffer, uint32_t vertexBindingD
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetPolygonModeEXT(VkCommandBuffer commandBuffer, VkPolygonMode polygonMode)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
unsigned polygon_mode = radv_translate_fill(polygonMode);
@@ -7711,7 +7710,7 @@ radv_CmdSetPolygonModeEXT(VkCommandBuffer commandBuffer, VkPolygonMode polygonMo
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetTessellationDomainOriginEXT(VkCommandBuffer commandBuffer, VkTessellationDomainOrigin domainOrigin)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ts.domain_origin = domainOrigin;
@@ -7722,7 +7721,7 @@ radv_CmdSetTessellationDomainOriginEXT(VkCommandBuffer commandBuffer, VkTessella
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetLogicOpEnableEXT(VkCommandBuffer commandBuffer, VkBool32 logicOpEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.cb.logic_op_enable = logicOpEnable;
@@ -7733,7 +7732,7 @@ radv_CmdSetLogicOpEnableEXT(VkCommandBuffer commandBuffer, VkBool32 logicOpEnabl
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetLineStippleEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stippledLineEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.line.stipple.enable = stippledLineEnable;
@@ -7744,7 +7743,7 @@ radv_CmdSetLineStippleEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stippled
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetAlphaToCoverageEnableEXT(VkCommandBuffer commandBuffer, VkBool32 alphaToCoverageEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ms.alpha_to_coverage_enable = alphaToCoverageEnable;
@@ -7755,7 +7754,7 @@ radv_CmdSetAlphaToCoverageEnableEXT(VkCommandBuffer commandBuffer, VkBool32 alph
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetAlphaToOneEnableEXT(VkCommandBuffer commandBuffer, VkBool32 alphaToOneEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ms.alpha_to_one_enable = alphaToOneEnable;
@@ -7766,7 +7765,7 @@ radv_CmdSetAlphaToOneEnableEXT(VkCommandBuffer commandBuffer, VkBool32 alphaToOn
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetSampleMaskEXT(VkCommandBuffer commandBuffer, VkSampleCountFlagBits samples, const VkSampleMask *pSampleMask)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ms.sample_mask = pSampleMask[0] & 0xffff;
@@ -7777,7 +7776,7 @@ radv_CmdSetSampleMaskEXT(VkCommandBuffer commandBuffer, VkSampleCountFlagBits sa
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthClipEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthClipEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.depth_clip_enable = depthClipEnable;
@@ -7789,7 +7788,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetConservativeRasterizationModeEXT(VkCommandBuffer commandBuffer,
VkConservativeRasterizationModeEXT conservativeRasterizationMode)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.conservative_mode = conservativeRasterizationMode;
@@ -7800,7 +7799,7 @@ radv_CmdSetConservativeRasterizationModeEXT(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthClipNegativeOneToOneEXT(VkCommandBuffer commandBuffer, VkBool32 negativeOneToOne)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.vp.depth_clip_negative_one_to_one = negativeOneToOne;
@@ -7811,7 +7810,7 @@ radv_CmdSetDepthClipNegativeOneToOneEXT(VkCommandBuffer commandBuffer, VkBool32
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetProvokingVertexModeEXT(VkCommandBuffer commandBuffer, VkProvokingVertexModeEXT provokingVertexMode)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.provoking_vertex = provokingVertexMode;
@@ -7822,7 +7821,7 @@ radv_CmdSetProvokingVertexModeEXT(VkCommandBuffer commandBuffer, VkProvokingVert
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthClampEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthClampEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.depth_clamp_enable = depthClampEnable;
@@ -7834,7 +7833,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetColorWriteMaskEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount,
const VkColorComponentFlags *pColorWriteMasks)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_cmd_state *state = &cmd_buffer->state;
@@ -7857,7 +7856,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetColorBlendEnableEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount,
const VkBool32 *pColorBlendEnables)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
assert(firstAttachment + attachmentCount <= MAX_RTS);
@@ -7874,7 +7873,7 @@ radv_CmdSetColorBlendEnableEXT(VkCommandBuffer commandBuffer, uint32_t firstAtta
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetRasterizationSamplesEXT(VkCommandBuffer commandBuffer, VkSampleCountFlagBits rasterizationSamples)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ms.rasterization_samples = rasterizationSamples;
@@ -7885,7 +7884,7 @@ radv_CmdSetRasterizationSamplesEXT(VkCommandBuffer commandBuffer, VkSampleCountF
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetLineRasterizationModeEXT(VkCommandBuffer commandBuffer, VkLineRasterizationModeKHR lineRasterizationMode)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.rs.line.mode = lineRasterizationMode;
@@ -7897,7 +7896,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetColorBlendEquationEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount,
const VkColorBlendEquationEXT *pColorBlendEquations)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
assert(firstAttachment + attachmentCount <= MAX_RTS);
@@ -7918,7 +7917,7 @@ radv_CmdSetColorBlendEquationEXT(VkCommandBuffer commandBuffer, uint32_t firstAt
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetSampleLocationsEnableEXT(VkCommandBuffer commandBuffer, VkBool32 sampleLocationsEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.ms.sample_locations_enable = sampleLocationsEnable;
@@ -7929,7 +7928,7 @@ radv_CmdSetSampleLocationsEnableEXT(VkCommandBuffer commandBuffer, VkBool32 samp
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDiscardRectangleEnableEXT(VkCommandBuffer commandBuffer, VkBool32 discardRectangleEnable)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.dr.enable = discardRectangleEnable;
@@ -7941,7 +7940,7 @@ radv_CmdSetDiscardRectangleEnableEXT(VkCommandBuffer commandBuffer, VkBool32 dis
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDiscardRectangleModeEXT(VkCommandBuffer commandBuffer, VkDiscardRectangleModeEXT discardRectangleMode)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.vk.dr.mode = discardRectangleMode;
@@ -7952,7 +7951,7 @@ radv_CmdSetDiscardRectangleModeEXT(VkCommandBuffer commandBuffer, VkDiscardRecta
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetAttachmentFeedbackLoopEnableEXT(VkCommandBuffer commandBuffer, VkImageAspectFlags aspectMask)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
state->dynamic.feedback_loop_aspects = aspectMask;
@@ -7963,7 +7962,7 @@ radv_CmdSetAttachmentFeedbackLoopEnableEXT(VkCommandBuffer commandBuffer, VkImag
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDepthBias2EXT(VkCommandBuffer commandBuffer, const VkDepthBiasInfoEXT *pDepthBiasInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
const VkDepthBiasRepresentationInfoEXT *dbr_info =
@@ -7981,7 +7980,7 @@ radv_CmdSetDepthBias2EXT(VkCommandBuffer commandBuffer, const VkDepthBiasInfoEXT
VKAPI_ATTR void VKAPI_CALL
radv_CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer *pCmdBuffers)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(primary);
const struct radv_physical_device *pdev = radv_device_physical(device);
@@ -7996,7 +7995,7 @@ radv_CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCou
radv_cp_dma_wait_for_idle(primary);
for (uint32_t i = 0; i < commandBufferCount; i++) {
- RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
+ VK_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
/* Do not launch an IB2 for secondary command buffers that contain
* DRAW_{INDEX}_INDIRECT_{MULTI} on GFX6-7 because it's illegal and hangs the GPU.
@@ -8171,7 +8170,7 @@ attachment_initial_layout(const VkRenderingAttachmentInfo *att)
VKAPI_ATTR void VKAPI_CALL
radv_CmdBeginRendering(VkCommandBuffer commandBuffer, const VkRenderingInfo *pRenderingInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
@@ -8398,7 +8397,7 @@ radv_CmdBeginRendering(VkCommandBuffer commandBuffer, const VkRenderingInfo *pRe
VKAPI_ATTR void VKAPI_CALL
radv_CmdEndRendering(VkCommandBuffer commandBuffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_mark_noncoherent_rb(cmd_buffer);
radv_cmd_buffer_resolve_rendering(cmd_buffer);
@@ -10018,7 +10017,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
uint32_t firstInstance)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_draw_info info;
info.count = vertexCount;
@@ -10039,7 +10038,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDrawMultiEXT(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT *pVertexInfo,
uint32_t instanceCount, uint32_t firstInstance, uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_draw_info info;
if (!drawCount)
@@ -10062,7 +10061,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
int32_t vertexOffset, uint32_t firstInstance)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_draw_info info;
info.indexed = true;
@@ -10084,7 +10083,7 @@ radv_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer, uint32_t drawCount,
const VkMultiDrawIndexedInfoEXT *pIndexInfo, uint32_t instanceCount, uint32_t firstInstance,
uint32_t stride, const int32_t *pVertexOffset)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_draw_info info;
if (!drawCount)
@@ -10108,8 +10107,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, uint32_t drawCount,
uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
struct radv_draw_info info;
info.count = drawCount;
@@ -10131,8 +10130,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, uint32_t drawCount,
uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
struct radv_draw_info info;
info.indexed = true;
@@ -10154,9 +10153,9 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset, VkBuffer _countBuffer,
VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
struct radv_draw_info info;
info.count = maxDrawCount;
@@ -10180,9 +10179,9 @@ radv_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer _buffer
VkBuffer _countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
struct radv_draw_info info;
info.indexed = true;
@@ -10204,7 +10203,7 @@ radv_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer _buffer
VKAPI_ATTR void VKAPI_CALL
radv_CmdDrawMeshTasksEXT(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_draw_info info;
@@ -10236,8 +10235,8 @@ radv_CmdDrawMeshTasksIndirectEXT(VkCommandBuffer commandBuffer, VkBuffer _buffer
if (!drawCount)
return;
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_draw_info info;
@@ -10269,9 +10268,9 @@ radv_CmdDrawMeshTasksIndirectCountEXT(VkCommandBuffer commandBuffer, VkBuffer _b
uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_draw_info info;
@@ -10809,7 +10808,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdDispatchBase(VkCommandBuffer commandBuffer, uint32_t base_x, uint32_t base_y, uint32_t base_z, uint32_t x,
uint32_t y, uint32_t z)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_dispatch_info info = {0};
info.blocks[0] = x;
@@ -10825,8 +10824,8 @@ radv_CmdDispatchBase(VkCommandBuffer commandBuffer, uint32_t base_x, uint32_t ba
VKAPI_ATTR void VKAPI_CALL
radv_CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer _buffer, VkDeviceSize offset)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
struct radv_dispatch_info info = {0};
info.indirect = buffer->bo;
@@ -11071,7 +11070,7 @@ radv_CmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddress
const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable, uint32_t width,
uint32_t height, uint32_t depth)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
VkTraceRaysIndirectCommand2KHR tables = {
.raygenShaderRecordAddress = pRaygenShaderBindingTable->deviceAddress,
@@ -11101,7 +11100,7 @@ radv_CmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer,
const VkStridedDeviceAddressRegionKHR *pCallableShaderBindingTable,
VkDeviceAddress indirectDeviceAddress)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
assert(device->use_global_bo_list);
@@ -11126,7 +11125,7 @@ radv_CmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL
radv_CmdTraceRaysIndirect2KHR(VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
assert(device->use_global_bo_list);
@@ -11137,7 +11136,7 @@ radv_CmdTraceRaysIndirect2KHR(VkCommandBuffer commandBuffer, VkDeviceAddress ind
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t size)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
cmd_buffer->state.rt_stack_size = size;
}
@@ -11557,7 +11556,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, const VkDependencyInfo *dep_inf
}
for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; i++) {
- RADV_FROM_HANDLE(radv_image, image, dep_info->pImageMemoryBarriers[i].image);
+ VK_FROM_HANDLE(radv_image, image, dep_info->pImageMemoryBarriers[i].image);
src_stage_mask |= dep_info->pImageMemoryBarriers[i].srcStageMask;
src_flush_bits |= radv_src_access_flush(cmd_buffer, dep_info->pImageMemoryBarriers[i].srcAccessMask, image);
@@ -11583,7 +11582,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, const VkDependencyInfo *dep_inf
radv_gang_barrier(cmd_buffer, src_stage_mask, 0);
for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; i++) {
- RADV_FROM_HANDLE(radv_image, image, dep_info->pImageMemoryBarriers[i].image);
+ VK_FROM_HANDLE(radv_image, image, dep_info->pImageMemoryBarriers[i].image);
const struct VkSampleLocationsInfoEXT *sample_locs_info =
vk_find_struct_const(dep_info->pImageMemoryBarriers[i].pNext, SAMPLE_LOCATIONS_INFO_EXT);
@@ -11627,7 +11626,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, const VkDependencyInfo *dep_inf
VKAPI_ATTR void VKAPI_CALL
radv_CmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
enum rgp_barrier_reason barrier_reason;
if (cmd_buffer->vk.runtime_rp_barrier) {
@@ -11714,8 +11713,8 @@ write_event(struct radv_cmd_buffer *cmd_buffer, struct radv_event *event, VkPipe
VKAPI_ATTR void VKAPI_CALL
radv_CmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent _event, const VkDependencyInfo *pDependencyInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_event, event, _event);
VkPipelineStageFlags2 src_stage_mask = 0;
for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
@@ -11731,8 +11730,8 @@ radv_CmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent _event, const VkDepende
VKAPI_ATTR void VKAPI_CALL
radv_CmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent _event, VkPipelineStageFlags2 stageMask)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_event, event, _event);
write_event(cmd_buffer, event, stageMask, 0);
}
@@ -11741,7 +11740,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
const VkDependencyInfo *pDependencyInfos)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radeon_cmdbuf *cs = cmd_buffer->cs;
@@ -11749,7 +11748,7 @@ radv_CmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const Vk
return;
for (unsigned i = 0; i < eventCount; ++i) {
- RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
+ VK_FROM_HANDLE(radv_event, event, pEvents[i]);
uint64_t va = radv_buffer_get_va(event->bo);
radv_cs_add_buffer(device->ws, cs, event->bo);
@@ -11892,8 +11891,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
bool draw_visible = true;
uint64_t va;
@@ -11914,7 +11913,7 @@ radv_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
VKAPI_ATTR void VKAPI_CALL
radv_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
radv_end_conditional_rendering(cmd_buffer);
}
@@ -11925,7 +11924,7 @@ radv_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets,
const VkDeviceSize *pSizes)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
uint8_t enabled_mask = 0;
@@ -12024,7 +12023,7 @@ radv_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstC
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_streamout_binding *sb = cmd_buffer->streamout_bindings;
@@ -12046,7 +12045,7 @@ radv_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstC
uint64_t va = 0;
if (append) {
- RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+ VK_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
uint64_t counter_buffer_offset = 0;
if (pCounterBufferOffsets)
@@ -12113,7 +12112,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount,
const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_streamout_state *so = &cmd_buffer->state.streamout;
@@ -12140,7 +12139,7 @@ radv_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCou
uint64_t va = 0;
if (append) {
- RADV_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
+ VK_FROM_HANDLE(radv_buffer, buffer, pCounterBuffers[counter_buffer_idx]);
uint64_t counter_buffer_offset = 0;
if (pCounterBufferOffsets)
@@ -12232,8 +12231,8 @@ radv_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer, uint32_t instanc
VkBuffer _counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset,
uint32_t vertexStride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
struct radv_draw_info info;
info.count = 0;
@@ -12258,8 +12257,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkBuffer dstBuffer,
VkDeviceSize dstOffset, uint32_t marker)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_buffer, buffer, dstBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, dstBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radeon_cmdbuf *cs = cmd_buffer->cs;
@@ -12306,8 +12305,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdUpdatePipelineIndirectBufferNV(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_compute_pipeline *compute_pipeline = radv_pipeline_to_compute(pipeline);
const uint64_t va = compute_pipeline->indirect.va;
@@ -12324,7 +12323,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdBindDescriptorBuffersEXT(VkCommandBuffer commandBuffer, uint32_t bufferCount,
const VkDescriptorBufferBindingInfoEXT *pBindingInfos)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
for (uint32_t i = 0; i < bufferCount; i++) {
cmd_buffer->descriptor_buffers[i] = pBindingInfos[i].address;
@@ -12353,7 +12352,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdSetDescriptorBufferOffsets2EXT(VkCommandBuffer commandBuffer,
const VkSetDescriptorBufferOffsetsInfoEXT *pSetDescriptorBufferOffsetsInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
if (pSetDescriptorBufferOffsetsInfo->stageFlags & VK_SHADER_STAGE_COMPUTE_BIT) {
radv_set_descriptor_buffer_offsets(cmd_buffer, pSetDescriptorBufferOffsetsInfo, VK_PIPELINE_BIND_POINT_COMPUTE);
@@ -12465,7 +12464,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdBindShadersEXT(VkCommandBuffer commandBuffer, uint32_t stageCount, const VkShaderStageFlagBits *pStages,
const VkShaderEXT *pShaders)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
VkShaderStageFlagBits bound_stages = 0;
for (uint32_t i = 0; i < stageCount; i++) {
@@ -12476,7 +12475,7 @@ radv_CmdBindShadersEXT(VkCommandBuffer commandBuffer, uint32_t stageCount, const
continue;
}
- RADV_FROM_HANDLE(radv_shader_object, shader_obj, pShaders[i]);
+ VK_FROM_HANDLE(radv_shader_object, shader_obj, pShaders[i]);
cmd_buffer->state.shader_objs[stage] = shader_obj;
diff --git a/src/amd/vulkan/radv_debug.c b/src/amd/vulkan/radv_debug.c
index 07f4bc6b346..667cedc70bc 100644
--- a/src/amd/vulkan/radv_debug.c
+++ b/src/amd/vulkan/radv_debug.c
@@ -1101,7 +1101,7 @@ radv_GetDeviceFaultInfoEXT(VkDevice _device, VkDeviceFaultCountsEXT *pFaultCount
VK_OUTARRAY_MAKE_TYPED(VkDeviceFaultAddressInfoEXT, out, pFaultInfo ? pFaultInfo->pAddressInfos : NULL,
&pFaultCounts->addressInfoCount);
struct radv_winsys_gpuvm_fault_info fault_info = {0};
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
bool vm_fault_occurred = false;
diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c
index 1ba18543869..524e5440cc2 100644
--- a/src/amd/vulkan/radv_descriptor_set.c
+++ b/src/amd/vulkan/radv_descriptor_set.c
@@ -137,7 +137,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
@@ -578,7 +578,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_pipeline_layout *layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
@@ -592,7 +592,7 @@ radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pC
layout->num_sets = pCreateInfo->setLayoutCount;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
- RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]);
+ VK_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]);
if (set_layout == NULL) {
layout->set[set].layout = NULL;
@@ -621,8 +621,8 @@ radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pC
VKAPI_ATTR void VKAPI_CALL
radv_DestroyPipelineLayout(VkDevice _device, VkPipelineLayout _pipelineLayout, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!pipeline_layout)
return;
@@ -954,15 +954,15 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorPool(VkDevice _device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return radv_create_descriptor_pool(device, pCreateInfo, pAllocator, pDescriptorPool);
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
if (!pool)
return;
@@ -973,8 +973,8 @@ radv_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, const VkAll
VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetDescriptorPool(VkDevice _device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
if (!pool->host_memory_base) {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
@@ -999,8 +999,8 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_AllocateDescriptorSets(VkDevice _device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
uint32_t i;
@@ -1012,7 +1012,7 @@ radv_AllocateDescriptorSets(VkDevice _device, const VkDescriptorSetAllocateInfo
/* allocate a set of buffers for each shader to contain descriptors */
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
- RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, pAllocateInfo->pSetLayouts[i]);
+ VK_FROM_HANDLE(radv_descriptor_set_layout, layout, pAllocateInfo->pSetLayouts[i]);
const uint32_t *variable_count = NULL;
if (layout->has_variable_descriptors && variable_counts) {
@@ -1044,11 +1044,11 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_FreeDescriptorSets(VkDevice _device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
for (uint32_t i = 0; i < count; i++) {
- RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
+ VK_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
if (set && !pool->host_memory_base)
radv_descriptor_set_destroy(device, pool, set, true);
@@ -1060,7 +1060,7 @@ static ALWAYS_INLINE void
write_texel_buffer_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, unsigned *dst,
struct radeon_winsys_bo **buffer_list, const VkBufferView _buffer_view)
{
- RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
+ VK_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
if (!buffer_view) {
memset(dst, 0, 4 * 4);
@@ -1117,7 +1117,7 @@ static ALWAYS_INLINE void
write_buffer_descriptor_impl(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, unsigned *dst,
struct radeon_winsys_bo **buffer_list, const VkDescriptorBufferInfo *buffer_info)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
uint64_t va = 0, range = 0;
if (buffer) {
@@ -1158,7 +1158,7 @@ static ALWAYS_INLINE void
write_dynamic_buffer_descriptor(struct radv_device *device, struct radv_descriptor_range *range,
struct radeon_winsys_bo **buffer_list, const VkDescriptorBufferInfo *buffer_info)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
uint64_t va;
unsigned size;
@@ -1216,7 +1216,7 @@ write_image_descriptor_impl(struct radv_device *device, struct radv_cmd_buffer *
unsigned *dst, struct radeon_winsys_bo **buffer_list, VkDescriptorType descriptor_type,
const VkDescriptorImageInfo *image_info)
{
- RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
+ VK_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
write_image_descriptor(dst, size, descriptor_type, image_info);
@@ -1250,7 +1250,7 @@ write_combined_image_sampler_descriptor(struct radv_device *device, struct radv_
write_image_descriptor_impl(device, cmd_buffer, sampler_offset, dst, buffer_list, descriptor_type, image_info);
/* copy over sampler state */
if (has_sampler) {
- RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
+ VK_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
memcpy(dst + sampler_offset / sizeof(*dst), sampler->state, 16);
}
}
@@ -1258,7 +1258,7 @@ write_combined_image_sampler_descriptor(struct radv_device *device, struct radv_
static ALWAYS_INLINE void
write_sampler_descriptor(unsigned *dst, VkSampler _sampler)
{
- RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
+ VK_FROM_HANDLE(radv_sampler, sampler, _sampler);
memcpy(dst, sampler->state, 16);
}
@@ -1266,8 +1266,7 @@ static ALWAYS_INLINE void
write_accel_struct(struct radv_device *device, void *ptr, VkDeviceAddress va)
{
if (!va) {
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct,
- device->meta_state.accel_struct_build.null.accel_struct);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, device->meta_state.accel_struct_build.null.accel_struct);
va = vk_acceleration_structure_get_va(accel_struct);
}
@@ -1283,7 +1282,7 @@ radv_update_descriptor_sets_impl(struct radv_device *device, struct radv_cmd_buf
uint32_t i, j;
for (i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
- RADV_FROM_HANDLE(radv_descriptor_set, set, dstSetOverride ? dstSetOverride : writeset->dstSet);
+ VK_FROM_HANDLE(radv_descriptor_set, set, dstSetOverride ? dstSetOverride : writeset->dstSet);
const struct radv_descriptor_set_binding_layout *binding_layout =
set->header.layout->binding + writeset->dstBinding;
uint32_t *ptr = set->header.mapped_ptr;
@@ -1358,7 +1357,7 @@ radv_update_descriptor_sets_impl(struct radv_device *device, struct radv_cmd_buf
}
break;
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, accel_structs->pAccelerationStructures[j]);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, accel_structs->pAccelerationStructures[j]);
write_accel_struct(device, ptr, accel_struct ? vk_acceleration_structure_get_va(accel_struct) : 0);
break;
@@ -1373,8 +1372,8 @@ radv_update_descriptor_sets_impl(struct radv_device *device, struct radv_cmd_buf
for (i = 0; i < descriptorCopyCount; i++) {
const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
- RADV_FROM_HANDLE(radv_descriptor_set, src_set, copyset->srcSet);
- RADV_FROM_HANDLE(radv_descriptor_set, dst_set, copyset->dstSet);
+ VK_FROM_HANDLE(radv_descriptor_set, src_set, copyset->srcSet);
+ VK_FROM_HANDLE(radv_descriptor_set, dst_set, copyset->dstSet);
const struct radv_descriptor_set_binding_layout *src_binding_layout =
src_set->header.layout->binding + copyset->srcBinding;
const struct radv_descriptor_set_binding_layout *dst_binding_layout =
@@ -1449,7 +1448,7 @@ radv_UpdateDescriptorSets(VkDevice _device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
radv_update_descriptor_sets_impl(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
descriptorCopyCount, pDescriptorCopies);
@@ -1472,7 +1471,7 @@ radv_CreateDescriptorUpdateTemplate(VkDevice _device, const VkDescriptorUpdateTe
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
const size_t size = sizeof(struct radv_descriptor_update_template) +
sizeof(struct radv_descriptor_update_template_entry) * entry_count;
@@ -1489,7 +1488,7 @@ radv_CreateDescriptorUpdateTemplate(VkDevice _device, const VkDescriptorUpdateTe
templ->entry_count = entry_count;
if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
/* descriptorSetLayout should be ignored for push descriptors
* and instead it refers to pipelineLayout and set.
@@ -1564,8 +1563,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_DestroyDescriptorUpdateTemplate(VkDevice _device, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
if (!templ)
return;
@@ -1579,7 +1578,7 @@ radv_update_descriptor_set_with_template_impl(struct radv_device *device, struct
struct radv_descriptor_set *set,
VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
{
- RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
+ VK_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
uint32_t i;
for (i = 0; i < templ->entry_count; ++i) {
@@ -1636,7 +1635,7 @@ radv_update_descriptor_set_with_template_impl(struct radv_device *device, struct
memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
break;
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, *(const VkAccelerationStructureKHR *)pSrc);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, *(const VkAccelerationStructureKHR *)pSrc);
write_accel_struct(device, pDst, accel_struct ? vk_acceleration_structure_get_va(accel_struct) : 0);
break;
}
@@ -1664,8 +1663,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_UpdateDescriptorSetWithTemplate(VkDevice _device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
radv_update_descriptor_set_with_template_impl(device, NULL, set, descriptorUpdateTemplate, pData);
}
@@ -1687,7 +1686,7 @@ radv_GetDescriptorSetLayoutHostMappingInfoVALVE(VkDevice _device,
VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetHostMappingVALVE(VkDevice _device, VkDescriptorSet descriptorSet, void **ppData)
{
- RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
+ VK_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
*ppData = set->header.mapped_ptr;
}
@@ -1695,7 +1694,7 @@ radv_GetDescriptorSetHostMappingVALVE(VkDevice _device, VkDescriptorSet descript
VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutSizeEXT(VkDevice device, VkDescriptorSetLayout layout, VkDeviceSize *pLayoutSizeInBytes)
{
- RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, layout);
+ VK_FROM_HANDLE(radv_descriptor_set_layout, set_layout, layout);
*pLayoutSizeInBytes = set_layout->size;
}
@@ -1703,7 +1702,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutBindingOffsetEXT(VkDevice device, VkDescriptorSetLayout layout, uint32_t binding,
VkDeviceSize *pOffset)
{
- RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, layout);
+ VK_FROM_HANDLE(radv_descriptor_set_layout, set_layout, layout);
*pOffset = set_layout->binding[binding].offset;
}
@@ -1711,7 +1710,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorEXT(VkDevice _device, const VkDescriptorGetInfoEXT *pDescriptorInfo, size_t dataSize,
void *pDescriptor)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
switch (pDescriptorInfo->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER: {
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index 20f8391fec9..5e55b1c9769 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -100,7 +100,7 @@ radv_GetMemoryHostPointerPropertiesEXT(VkDevice _device, VkExternalMemoryHandleT
const void *pHostPointer,
VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
switch (handleType) {
@@ -607,7 +607,7 @@ init_dispatch_tables(struct radv_device *device, struct radv_physical_device *pd
static VkResult
capture_trace(VkQueue _queue)
{
- RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ VK_FROM_HANDLE(radv_queue, queue, _queue);
struct radv_device *device = radv_queue_device(queue);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
@@ -853,7 +853,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
struct radv_instance *instance = radv_physical_device_instance(pdev);
VkResult result;
struct radv_device *device;
@@ -1283,7 +1283,7 @@ fail_queue:
VKAPI_ATTR void VKAPI_CALL
radv_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
if (!device)
return;
@@ -1378,8 +1378,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetImageMemoryRequirements2(VkDevice _device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_image, image, pInfo->image);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_image, image, pInfo->image);
const struct radv_physical_device *pdev = radv_device_physical(device);
pMemoryRequirements->memoryRequirements.memoryTypeBits =
@@ -2035,8 +2035,8 @@ radv_gfx11_set_db_render_control(const struct radv_device *device, unsigned num_
VKAPI_ATTR VkResult VKAPI_CALL
radv_GetMemoryFdKHR(VkDevice _device, const VkMemoryGetFdInfoKHR *pGetFdInfo, int *pFD)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
@@ -2100,7 +2100,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetMemoryFdPropertiesKHR(VkDevice _device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_physical_device *pdev = radv_device_physical(device);
switch (handleType) {
@@ -2131,7 +2131,7 @@ radv_GetCalibratedTimestampsKHR(VkDevice _device, uint32_t timestampCount,
uint64_t *pMaxDeviation)
{
#ifndef _WIN32
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
uint32_t clock_crystal_freq = pdev->info.clock_crystal_freq;
int d;
@@ -2229,7 +2229,7 @@ radv_device_release_performance_counters(struct radv_device *device)
VKAPI_ATTR VkResult VKAPI_CALL
radv_AcquireProfilingLockKHR(VkDevice _device, const VkAcquireProfilingLockInfoKHR *pInfo)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
bool result = radv_device_acquire_performance_counters(device);
return result ? VK_SUCCESS : VK_ERROR_UNKNOWN;
}
@@ -2237,7 +2237,7 @@ radv_AcquireProfilingLockKHR(VkDevice _device, const VkAcquireProfilingLockInfoK
VKAPI_ATTR void VKAPI_CALL
radv_ReleaseProfilingLockKHR(VkDevice _device)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
radv_device_release_performance_counters(device);
}
diff --git a/src/amd/vulkan/radv_device_generated_commands.c b/src/amd/vulkan/radv_device_generated_commands.c
index 63ea3646349..511993e0020 100644
--- a/src/amd/vulkan/radv_device_generated_commands.c
+++ b/src/amd/vulkan/radv_device_generated_commands.c
@@ -1790,7 +1790,7 @@ radv_CreateIndirectCommandsLayoutNV(VkDevice _device, const VkIndirectCommandsLa
const VkAllocationCallbacks *pAllocator,
VkIndirectCommandsLayoutNV *pIndirectCommandsLayout)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_indirect_command_layout *layout;
size_t size = sizeof(*layout) + pCreateInfo->tokenCount * sizeof(VkIndirectCommandsLayoutTokenNV);
@@ -1841,7 +1841,7 @@ radv_CreateIndirectCommandsLayoutNV(VkDevice _device, const VkIndirectCommandsLa
layout->vbo_offsets[pCreateInfo->pTokens[i].vertexBindingUnit] |= DGC_DYNAMIC_STRIDE;
break;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV: {
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pTokens[i].pushconstantPipelineLayout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pTokens[i].pushconstantPipelineLayout);
for (unsigned j = pCreateInfo->pTokens[i].pushconstantOffset / 4, k = 0;
k < pCreateInfo->pTokens[i].pushconstantSize / 4; ++j, ++k) {
layout->push_constant_mask |= 1ull << j;
@@ -1874,7 +1874,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_DestroyIndirectCommandsLayoutNV(VkDevice _device, VkIndirectCommandsLayoutNV indirectCommandsLayout,
const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(radv_indirect_command_layout, layout, indirectCommandsLayout);
if (!layout)
@@ -1889,7 +1889,7 @@ radv_GetGeneratedCommandsMemoryRequirementsNV(VkDevice _device,
const VkGeneratedCommandsMemoryRequirementsInfoNV *pInfo,
VkMemoryRequirements2 *pMemoryRequirements)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
VK_FROM_HANDLE(radv_indirect_command_layout, layout, pInfo->indirectCommandsLayout);
VK_FROM_HANDLE(radv_pipeline, pipeline, pInfo->pipeline);
@@ -2295,7 +2295,7 @@ radv_GetPipelineIndirectMemoryRequirementsNV(VkDevice _device, const VkComputePi
{
VkMemoryRequirements *reqs = &pMemoryRequirements->memoryRequirements;
const uint32_t size = sizeof(struct radv_compute_pipeline_metadata);
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
reqs->memoryTypeBits = ((1u << pdev->memory_properties.memoryTypeCount) - 1u) & ~pdev->memory_types_32bit;
@@ -2306,7 +2306,7 @@ radv_GetPipelineIndirectMemoryRequirementsNV(VkDevice _device, const VkComputePi
VKAPI_ATTR VkDeviceAddress VKAPI_CALL
radv_GetPipelineIndirectDeviceAddressNV(VkDevice device, const VkPipelineIndirectDeviceAddressInfoNV *pInfo)
{
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pInfo->pipeline);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pInfo->pipeline);
return radv_pipeline_to_compute(pipeline)->indirect.va;
}
diff --git a/src/amd/vulkan/radv_device_memory.c b/src/amd/vulkan/radv_device_memory.c
index de683a42031..766a28d53fe 100644
--- a/src/amd/vulkan/radv_device_memory.c
+++ b/src/amd/vulkan/radv_device_memory.c
@@ -284,15 +284,15 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_AllocateMemory(VkDevice _device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMem)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem, false);
}
VKAPI_ATTR void VKAPI_CALL
radv_FreeMemory(VkDevice _device, VkDeviceMemory _mem, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device_memory, mem, _mem);
radv_free_memory(device, pAllocator, mem);
}
@@ -300,8 +300,8 @@ radv_FreeMemory(VkDevice _device, VkDeviceMemory _mem, const VkAllocationCallbac
VKAPI_ATTR VkResult VKAPI_CALL
radv_MapMemory2KHR(VkDevice _device, const VkMemoryMapInfoKHR *pMemoryMapInfo, void **ppData)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_device_memory, mem, pMemoryMapInfo->memory);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device_memory, mem, pMemoryMapInfo->memory);
void *fixed_address = NULL;
bool use_fixed_address = false;
@@ -331,8 +331,8 @@ radv_MapMemory2KHR(VkDevice _device, const VkMemoryMapInfoKHR *pMemoryMapInfo, v
VKAPI_ATTR VkResult VKAPI_CALL
radv_UnmapMemory2KHR(VkDevice _device, const VkMemoryUnmapInfoKHR *pMemoryUnmapInfo)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_device_memory, mem, pMemoryUnmapInfo->memory);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device_memory, mem, pMemoryUnmapInfo->memory);
vk_rmv_log_cpu_map(&device->vk, mem->bo->va, true);
if (mem->user_ptr == NULL)
@@ -356,7 +356,7 @@ radv_InvalidateMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount, c
VKAPI_ATTR uint64_t VKAPI_CALL
radv_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
{
- RADV_FROM_HANDLE(radv_device_memory, mem, pInfo->memory);
+ VK_FROM_HANDLE(radv_device_memory, mem, pInfo->memory);
return radv_buffer_get_va(mem->bo);
}
diff --git a/src/amd/vulkan/radv_event.c b/src/amd/vulkan/radv_event.c
index 580a06b60a3..1f1d652836e 100644
--- a/src/amd/vulkan/radv_event.c
+++ b/src/amd/vulkan/radv_event.c
@@ -89,7 +89,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateEvent(VkDevice _device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkEvent *pEvent)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VkResult result = radv_create_event(device, pCreateInfo, pAllocator, pEvent, false);
if (result != VK_SUCCESS)
return result;
@@ -100,8 +100,8 @@ radv_CreateEvent(VkDevice _device, const VkEventCreateInfo *pCreateInfo, const V
VKAPI_ATTR void VKAPI_CALL
radv_DestroyEvent(VkDevice _device, VkEvent _event, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_event, event, _event);
if (!event)
return;
@@ -112,8 +112,8 @@ radv_DestroyEvent(VkDevice _device, VkEvent _event, const VkAllocationCallbacks
VKAPI_ATTR VkResult VKAPI_CALL
radv_GetEventStatus(VkDevice _device, VkEvent _event)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_event, event, _event);
if (vk_device_is_lost(&device->vk))
return VK_ERROR_DEVICE_LOST;
@@ -126,7 +126,7 @@ radv_GetEventStatus(VkDevice _device, VkEvent _event)
VKAPI_ATTR VkResult VKAPI_CALL
radv_SetEvent(VkDevice _device, VkEvent _event)
{
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_event, event, _event);
*event->map = 1;
return VK_SUCCESS;
@@ -135,7 +135,7 @@ radv_SetEvent(VkDevice _device, VkEvent _event)
VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetEvent(VkDevice _device, VkEvent _event)
{
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_event, event, _event);
*event->map = 0;
return VK_SUCCESS;
diff --git a/src/amd/vulkan/radv_formats.c b/src/amd/vulkan/radv_formats.c
index 1ad780afd50..f1bb808fb7c 100644
--- a/src/amd/vulkan/radv_formats.c
+++ b/src/amd/vulkan/radv_formats.c
@@ -1353,7 +1353,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format,
VkFormatProperties2 *pFormatProperties)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
VkFormatProperties3 format_props;
radv_physical_device_get_format_properties(pdev, format, &format_props);
@@ -1668,7 +1668,7 @@ radv_GetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL;
VkExternalImageFormatProperties *external_props = NULL;
@@ -1841,7 +1841,7 @@ radv_GetPhysicalDeviceSparseImageFormatProperties2(VkPhysicalDevice physicalDevi
uint32_t *pPropertyCount,
VkSparseImageFormatProperties2 *pProperties)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
VkResult result;
if (pFormatInfo->samples > VK_SAMPLE_COUNT_1_BIT) {
@@ -1877,8 +1877,8 @@ radv_GetImageSparseMemoryRequirements2(VkDevice _device, const VkImageSparseMemo
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_image, image, pInfo->image);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_image, image, pInfo->image);
struct radv_physical_device *pdev = radv_device_physical(device);
if (!(image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c
index 3a76d623a3d..5a34a198247 100644
--- a/src/amd/vulkan/radv_image.c
+++ b/src/amd/vulkan/radv_image.c
@@ -1220,7 +1220,7 @@ radv_destroy_image(struct radv_device *device, const VkAllocationCallbacks *pAll
radv_bo_destroy(device, &image->vk.base, image->bindings[0].bo);
if (image->owned_memory != VK_NULL_HANDLE) {
- RADV_FROM_HANDLE(radv_device_memory, mem, image->owned_memory);
+ VK_FROM_HANDLE(radv_device_memory, mem, image->owned_memory);
radv_free_memory(device, pAllocator, mem);
}
@@ -1294,7 +1294,7 @@ VkResult
radv_image_create(VkDevice _device, const struct radv_image_create_info *create_info,
const VkAllocationCallbacks *alloc, VkImage *pImage, bool is_internal)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
@@ -1636,7 +1636,7 @@ radv_CreateImage(VkDevice _device, const VkImageCreateInfo *pCreateInfo, const V
/* Ignore swapchain creation info on Android. Since we don't have an implementation in Mesa,
* we're guaranteed to access an Android object incorrectly.
*/
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
const VkImageSwapchainCreateInfoKHR *swapchain_info =
vk_find_struct_const(pCreateInfo->pNext, IMAGE_SWAPCHAIN_CREATE_INFO_KHR);
@@ -1661,8 +1661,8 @@ radv_CreateImage(VkDevice _device, const VkImageCreateInfo *pCreateInfo, const V
VKAPI_ATTR void VKAPI_CALL
radv_DestroyImage(VkDevice _device, VkImage _image, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_image, image, _image);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_image, image, _image);
if (!image)
return;
@@ -1673,13 +1673,13 @@ radv_DestroyImage(VkDevice _device, VkImage _image, const VkAllocationCallbacks
VKAPI_ATTR VkResult VKAPI_CALL
radv_BindImageMemory2(VkDevice _device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_instance *instance = radv_physical_device_instance(pdev);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
- RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
- RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
+ VK_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
+ VK_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
VkBindMemoryStatusKHR *status = (void *)vk_find_struct_const(&pBindInfos[i], BIND_MEMORY_STATUS_KHR);
if (status)
@@ -1755,8 +1755,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetImageSubresourceLayout2KHR(VkDevice _device, VkImage _image, const VkImageSubresource2KHR *pSubresource,
VkSubresourceLayout2KHR *pLayout)
{
- RADV_FROM_HANDLE(radv_image, image, _image);
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_image, image, _image);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
int level = pSubresource->imageSubresource.mipLevel;
int layer = pSubresource->imageSubresource.arrayLayer;
@@ -1835,7 +1835,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetImageDrmFormatModifierPropertiesEXT(VkDevice _device, VkImage _image,
VkImageDrmFormatModifierPropertiesEXT *pProperties)
{
- RADV_FROM_HANDLE(radv_image, image, _image);
+ VK_FROM_HANDLE(radv_image, image, _image);
pProperties->drmFormatModifier = image->planes[0].surface.modifier;
return VK_SUCCESS;
diff --git a/src/amd/vulkan/radv_image_view.c b/src/amd/vulkan/radv_image_view.c
index 70d3cad9150..c62b2ff21c8 100644
--- a/src/amd/vulkan/radv_image_view.c
+++ b/src/amd/vulkan/radv_image_view.c
@@ -745,7 +745,7 @@ radv_image_view_init(struct radv_image_view *iview, struct radv_device *device,
const VkImageViewCreateInfo *pCreateInfo, VkImageCreateFlags img_create_flags,
const struct radv_image_view_extra_create_info *extra_create_info)
{
- RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
+ VK_FROM_HANDLE(radv_image, image, pCreateInfo->image);
const struct radv_physical_device *pdev = radv_device_physical(device);
const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
uint32_t plane_count = 1;
@@ -924,8 +924,8 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateImageView(VkDevice _device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView)
{
- RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_image, image, pCreateInfo->image);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_image_view *view;
view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -943,8 +943,8 @@ radv_CreateImageView(VkDevice _device, const VkImageViewCreateInfo *pCreateInfo,
VKAPI_ATTR void VKAPI_CALL
radv_DestroyImageView(VkDevice _device, VkImageView _iview, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_image_view, iview, _iview);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_image_view, iview, _iview);
if (!iview)
return;
diff --git a/src/amd/vulkan/radv_instance.c b/src/amd/vulkan/radv_instance.c
index 3728bfd534e..6589e068bba 100644
--- a/src/amd/vulkan/radv_instance.c
+++ b/src/amd/vulkan/radv_instance.c
@@ -375,7 +375,7 @@ radv_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationC
VKAPI_ATTR void VKAPI_CALL
radv_DestroyInstance(VkInstance _instance, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_instance, instance, _instance);
+ VK_FROM_HANDLE(radv_instance, instance, _instance);
if (!instance)
return;
@@ -421,7 +421,7 @@ radv_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount, VkLayerPropertie
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
radv_GetInstanceProcAddr(VkInstance _instance, const char *pName)
{
- RADV_FROM_HANDLE(vk_instance, instance, _instance);
+ VK_FROM_HANDLE(vk_instance, instance, _instance);
return vk_instance_get_proc_addr(instance, &radv_instance_entrypoints, pName);
}
diff --git a/src/amd/vulkan/radv_perfcounter.c b/src/amd/vulkan/radv_perfcounter.c
index 47ed3c364c0..c8d7ad45fb8 100644
--- a/src/amd/vulkan/radv_perfcounter.c
+++ b/src/amd/vulkan/radv_perfcounter.c
@@ -822,7 +822,7 @@ radv_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t *pCounterCount,
VkPerformanceCounterKHR *pCounters, VkPerformanceCounterDescriptionKHR *pCounterDescriptions)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
if (vk_queue_to_radv(pdev, queueFamilyIndex) != RADV_QUEUE_GENERAL) {
*pCounterCount = 0;
@@ -874,7 +874,7 @@ radv_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR *pPerformanceQueryCreateInfo,
uint32_t *pNumPasses)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
if (pPerformanceQueryCreateInfo->counterIndexCount == 0) {
*pNumPasses = 0;
diff --git a/src/amd/vulkan/radv_physical_device.c b/src/amd/vulkan/radv_physical_device.c
index cbb6e8f6ecc..2ba8e051626 100644
--- a/src/amd/vulkan/radv_physical_device.c
+++ b/src/amd/vulkan/radv_physical_device.c
@@ -2338,7 +2338,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkQueueFamilyProperties2 *pQueueFamilyProperties)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
if (!pQueueFamilyProperties) {
radv_get_physical_device_queue_family_properties(pdev, pCount, NULL);
return;
@@ -2391,7 +2391,7 @@ static void
radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
VkPhysicalDeviceMemoryProperties *memory_properties = &pdev->memory_properties;
@@ -2521,7 +2521,7 @@ VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
pMemoryProperties->memoryProperties = pdev->memory_properties;
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 35085656f74..ba7f06510de 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -125,8 +125,8 @@ radv_pipeline_destroy(struct radv_device *device, struct radv_pipeline *pipeline
VKAPI_ATTR void VKAPI_CALL
radv_DestroyPipeline(VkDevice _device, VkPipeline _pipeline, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
@@ -836,7 +836,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetPipelineExecutablePropertiesKHR(VkDevice _device, const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount, VkPipelineExecutablePropertiesKHR *pProperties)
{
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelineInfo->pipeline);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pPipelineInfo->pipeline);
const uint32_t total_count = radv_get_executable_count(pipeline);
if (!pProperties) {
@@ -936,8 +936,8 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetPipelineExecutableStatisticsKHR(VkDevice _device, const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount, VkPipelineExecutableStatisticKHR *pStatistics)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
gl_shader_stage stage;
struct radv_shader *shader =
radv_get_shader_from_executable_index(pipeline, pExecutableInfo->executableIndex, &stage);
@@ -1071,8 +1071,8 @@ radv_GetPipelineExecutableInternalRepresentationsKHR(
VkDevice _device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR *pInternalRepresentations)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
const struct radv_physical_device *pdev = radv_device_physical(device);
gl_shader_stage stage;
struct radv_shader *shader =
@@ -1151,7 +1151,7 @@ radv_copy_shader_stage_create_info(struct radv_device *device, uint32_t stageCou
memcpy(new_stages, pStages, size);
for (uint32_t i = 0; i < stageCount; i++) {
- RADV_FROM_HANDLE(vk_shader_module, module, new_stages[i].module);
+ VK_FROM_HANDLE(vk_shader_module, module, new_stages[i].module);
const VkShaderModuleCreateInfo *minfo = vk_find_struct_const(pStages[i].pNext, SHADER_MODULE_CREATE_INFO);
diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c
index 4996134ee66..50c92d6cb8e 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -93,7 +93,7 @@ void
radv_hash_rt_shaders(const struct radv_device *device, unsigned char *hash, const struct radv_ray_tracing_stage *stages,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, const struct radv_ray_tracing_group *groups)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, layout, pCreateInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, layout, pCreateInfo->layout);
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
@@ -118,7 +118,7 @@ radv_hash_rt_shaders(const struct radv_device *device, unsigned char *hash, cons
if (pCreateInfo->pLibraryInfo) {
for (uint32_t i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, lib_pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, lib_pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_ray_tracing_pipeline *lib = radv_pipeline_to_ray_tracing(lib_pipeline);
_mesa_sha1_update(&ctx, lib->sha1, SHA1_DIGEST_LENGTH);
}
diff --git a/src/amd/vulkan/radv_pipeline_compute.c b/src/amd/vulkan/radv_pipeline_compute.c
index 39017b44306..010c6f39506 100644
--- a/src/amd/vulkan/radv_pipeline_compute.c
+++ b/src/amd/vulkan/radv_pipeline_compute.c
@@ -287,9 +287,9 @@ VkResult
radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
struct radv_compute_pipeline *pipeline;
VkResult result;
diff --git a/src/amd/vulkan/radv_pipeline_graphics.c b/src/amd/vulkan/radv_pipeline_graphics.c
index c5f6068735e..35ed1061e69 100644
--- a/src/amd/vulkan/radv_pipeline_graphics.c
+++ b/src/amd/vulkan/radv_pipeline_graphics.c
@@ -676,7 +676,7 @@ radv_pipeline_import_graphics_info(struct radv_device *device, struct radv_graph
const VkGraphicsPipelineCreateInfo *pCreateInfo,
VkGraphicsPipelineLibraryFlagBitsEXT lib_flags)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
VkResult result;
/* Mark all states declared dynamic at pipeline creation. */
@@ -2437,7 +2437,7 @@ radv_pipeline_load_retained_shaders(const struct radv_device *device, struct rad
return;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
struct radv_graphics_lib_pipeline *gfx_pipeline_lib = radv_pipeline_to_graphics_lib(pipeline_lib);
radv_pipeline_import_retained_shaders(device, pipeline, gfx_pipeline_lib, stages);
@@ -2849,7 +2849,7 @@ done:
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
if (libs_info) {
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
struct radv_graphics_lib_pipeline *gfx_pipeline_lib = radv_pipeline_to_graphics_lib(pipeline_lib);
if (!gfx_pipeline_lib->base.active_stages)
@@ -4117,7 +4117,7 @@ radv_graphics_pipeline_init(struct radv_graphics_pipeline *pipeline, struct radv
(pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
struct radv_graphics_lib_pipeline *gfx_pipeline_lib = radv_pipeline_to_graphics_lib(pipeline_lib);
assert(pipeline_lib->type == RADV_PIPELINE_GRAPHICS_LIB);
@@ -4219,7 +4219,7 @@ radv_graphics_pipeline_create(VkDevice _device, VkPipelineCache _cache, const Vk
const struct radv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
struct radv_graphics_pipeline *pipeline;
VkResult result;
@@ -4286,7 +4286,7 @@ radv_graphics_lib_pipeline_init(struct radv_graphics_lib_pipeline *pipeline, str
(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
struct radv_graphics_lib_pipeline *gfx_pipeline_lib = radv_pipeline_to_graphics_lib(pipeline_lib);
radv_graphics_pipeline_import_lib(device, &pipeline->base, state, pipeline_layout, gfx_pipeline_lib,
@@ -4319,7 +4319,7 @@ radv_graphics_lib_pipeline_create(VkDevice _device, VkPipelineCache _cache,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
{
VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_graphics_lib_pipeline *pipeline;
VkResult result;
diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c
index e3295668b87..47e23147e2e 100644
--- a/src/amd/vulkan/radv_pipeline_rt.c
+++ b/src/amd/vulkan/radv_pipeline_rt.c
@@ -100,7 +100,7 @@ radv_generate_rt_shaders_key(const struct radv_device *device, const struct radv
if (pCreateInfo->pLibraryInfo) {
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline_lib);
/* apply shader robustness from merged shaders */
if (library_pipeline->traversal_storage_robustness2)
@@ -234,7 +234,7 @@ radv_rt_fill_group_info(struct radv_device *device, const struct radv_ray_tracin
if (pCreateInfo->pLibraryInfo) {
unsigned stage_count = pCreateInfo->stageCount;
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline_lib);
for (unsigned j = 0; j < library_pipeline->group_count; ++j) {
@@ -266,7 +266,7 @@ radv_rt_fill_stage_info(const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, st
if (pCreateInfo->pLibraryInfo) {
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline);
for (unsigned j = 0; j < library_pipeline->stage_count; ++j) {
if (library_pipeline->stages[j].nir)
@@ -288,7 +288,7 @@ static void
radv_init_rt_stage_hashes(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
struct radv_ray_tracing_stage *stages, const struct radv_shader_stage_key *stage_keys)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
for (uint32_t idx = 0; idx < pCreateInfo->stageCount; idx++) {
gl_shader_stage s = vk_to_mesa_shader_stage(pCreateInfo->pStages[idx].stage);
@@ -309,7 +309,7 @@ radv_create_merged_rt_create_info(const VkRayTracingPipelineCreateInfoKHR *pCrea
if (pCreateInfo->pLibraryInfo) {
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline);
total_stages += library_pipeline->stage_count;
@@ -574,7 +574,7 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
const struct radv_shader_stage_key *stage_keys, struct radv_ray_tracing_pipeline *pipeline,
struct radv_serialized_shader_arena_block *capture_replay_handles)
{
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
return VK_PIPELINE_COMPILE_REQUIRED;
@@ -853,9 +853,9 @@ static VkResult
radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+ VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
VkResult result;
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
@@ -1005,7 +1005,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline _pipeline, uint32_t firstGroup, uint32_t groupCount,
size_t dataSize, void *pData)
{
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_ray_tracing_group *groups = radv_pipeline_to_ray_tracing(pipeline)->groups;
char *data = pData;
@@ -1024,7 +1024,7 @@ VKAPI_ATTR VkDeviceSize VKAPI_CALL
radv_GetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline _pipeline, uint32_t group,
VkShaderGroupShaderKHR groupShader)
{
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_ray_tracing_pipeline *rt_pipeline = radv_pipeline_to_ray_tracing(pipeline);
struct radv_ray_tracing_group *rt_group = &rt_pipeline->groups[group];
switch (groupShader) {
@@ -1044,7 +1044,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline _pipeline, uint32_t firstGroup,
uint32_t groupCount, size_t dataSize, void *pData)
{
- RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
+ VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_ray_tracing_pipeline *rt_pipeline = radv_pipeline_to_ray_tracing(pipeline);
struct radv_rt_capture_replay_handle *data = pData;
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index a200b9761b2..bfd0505a470 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -173,8 +173,6 @@ radv_float_to_ufixed(float value, unsigned frac_bits)
return value * (1 << frac_bits);
}
-#define RADV_FROM_HANDLE(__radv_type, __name, __handle) VK_FROM_HANDLE(__radv_type, __name, __handle)
-
#ifdef __cplusplus
}
#endif
diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
index 02e58a263db..76e20be2141 100644
--- a/src/amd/vulkan/radv_query.c
+++ b/src/amd/vulkan/radv_query.c
@@ -1328,15 +1328,15 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateQueryPool(VkDevice _device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
return radv_create_query_pool(device, pCreateInfo, pAllocator, pQueryPool);
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroyQueryPool(VkDevice _device, VkQueryPool _pool, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_query_pool, pool, _pool);
if (!pool)
return;
@@ -1369,8 +1369,8 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetQueryPoolResults(VkDevice _device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
const struct radv_physical_device *pdev = radv_device_physical(device);
char *data = pData;
VkResult result = VK_SUCCESS;
@@ -1734,9 +1734,9 @@ radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPoo
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride,
VkQueryResultFlags flags)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
- RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
@@ -1935,8 +1935,8 @@ query_clear_value(VkQueryType type)
VKAPI_ATTR void VKAPI_CALL
radv_CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
uint32_t value = query_clear_value(pool->vk.query_type);
@@ -1968,8 +1968,8 @@ radv_CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uin
VKAPI_ATTR void VKAPI_CALL
radv_ResetQueryPool(VkDevice _device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
const struct radv_physical_device *pdev = radv_device_physical(device);
uint32_t value = query_clear_value(pool->vk.query_type);
@@ -2520,8 +2520,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(pool->bo);
@@ -2545,8 +2545,8 @@ radv_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPoo
VKAPI_ATTR void VKAPI_CALL
radv_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
uint64_t va = radv_buffer_get_va(pool->bo);
uint64_t avail_va = va + pool->availability_offset + 4 * query;
va += pool->stride * query;
@@ -2599,8 +2599,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_CmdWriteTimestamp2(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool,
uint32_t query)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
@@ -2654,8 +2654,8 @@ radv_CmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t pool_va = radv_buffer_get_va(pool->bo);
@@ -2668,7 +2668,7 @@ radv_CmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,
ASSERTED unsigned cdw_max = radeon_check_space(device->ws, cs, 6 * accelerationStructureCount);
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
- RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, pAccelerationStructures[i]);
+ VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, pAccelerationStructures[i]);
uint64_t va = vk_acceleration_structure_get_va(accel_struct);
switch (queryType) {
diff --git a/src/amd/vulkan/radv_queue.c b/src/amd/vulkan/radv_queue.c
index eba6f1fb210..c889adc203f 100644
--- a/src/amd/vulkan/radv_queue.c
+++ b/src/amd/vulkan/radv_queue.c
@@ -65,7 +65,7 @@ radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoKHR *p
static VkResult
radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
VkResult result = VK_SUCCESS;
struct radv_device_memory *mem = NULL;
@@ -113,7 +113,7 @@ radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferM
static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind)
{
- RADV_FROM_HANDLE(radv_image, image, bind->image);
+ VK_FROM_HANDLE(radv_image, image, bind->image);
VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
@@ -134,7 +134,7 @@ radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseI
static VkResult
radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMemoryBindInfo *bind)
{
- RADV_FROM_HANDLE(radv_image, image, bind->image);
+ VK_FROM_HANDLE(radv_image, image, bind->image);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radeon_surf *surface = &image->planes[0].surface;
uint32_t bs = vk_format_get_blocksize(image->vk.format);
diff --git a/src/amd/vulkan/radv_rmv.c b/src/amd/vulkan/radv_rmv.c
index 351b51ce887..26411929f3e 100644
--- a/src/amd/vulkan/radv_rmv.c
+++ b/src/amd/vulkan/radv_rmv.c
@@ -494,7 +494,7 @@ radv_rmv_log_heap_create(struct radv_device *device, VkDeviceMemory heap, bool i
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_device_memory, memory, heap);
+ VK_FROM_HANDLE(radv_device_memory, memory, heap);
/* Do not log zero-sized device memory objects. */
if (!memory->alloc_size)
@@ -567,7 +567,7 @@ radv_rmv_log_buffer_bind(struct radv_device *device, VkBuffer _buffer)
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
log_resource_bind_locked(device, (uint64_t)_buffer, buffer->bo, buffer->offset, buffer->vk.size);
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
@@ -580,7 +580,7 @@ radv_rmv_log_image_create(struct radv_device *device, const VkImageCreateInfo *c
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_image, image, _image);
+ VK_FROM_HANDLE(radv_image, image, _image);
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
struct vk_rmv_resource_create_token token = {0};
@@ -617,7 +617,7 @@ radv_rmv_log_image_bind(struct radv_device *device, VkImage _image)
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_image, image, _image);
+ VK_FROM_HANDLE(radv_image, image, _image);
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
log_resource_bind_locked(device, (uint64_t)_image, image->bindings[0].bo, image->bindings[0].offset, image->size);
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
@@ -629,7 +629,7 @@ radv_rmv_log_query_pool_create(struct radv_device *device, VkQueryPool _pool)
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
+ VK_FROM_HANDLE(radv_query_pool, pool, _pool);
if (pool->vk.query_type != VK_QUERY_TYPE_OCCLUSION && pool->vk.query_type != VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pool->vk.query_type != VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT)
@@ -776,7 +776,7 @@ radv_rmv_log_descriptor_pool_create(struct radv_device *device, const VkDescript
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
+ VK_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
if (pool->bo)
vk_rmv_log_cpu_map(&device->vk, pool->bo->va, false);
@@ -918,7 +918,7 @@ radv_rmv_log_event_create(struct radv_device *device, VkEvent _event, VkEventCre
if (!device->vk.memory_trace_data.is_enabled)
return;
- RADV_FROM_HANDLE(radv_event, event, _event);
+ VK_FROM_HANDLE(radv_event, event, _event);
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
struct vk_rmv_resource_create_token create_token = {0};
diff --git a/src/amd/vulkan/radv_rra.c b/src/amd/vulkan/radv_rra.c
index 289beafa0dc..86b5f3f63ef 100644
--- a/src/amd/vulkan/radv_rra.c
+++ b/src/amd/vulkan/radv_rra.c
@@ -1044,7 +1044,7 @@ struct rra_copy_context {
static VkResult
rra_copy_context_init(struct rra_copy_context *ctx)
{
- RADV_FROM_HANDLE(radv_device, device, ctx->device);
+ VK_FROM_HANDLE(radv_device, device, ctx->device);
if (device->rra_trace.copy_after_build)
return VK_SUCCESS;
@@ -1122,7 +1122,7 @@ fail_pool:
static void
rra_copy_context_finish(struct rra_copy_context *ctx)
{
- RADV_FROM_HANDLE(radv_device, device, ctx->device);
+ VK_FROM_HANDLE(radv_device, device, ctx->device);
if (device->rra_trace.copy_after_build)
return;
@@ -1317,7 +1317,7 @@ static_assert(sizeof(struct rra_ray_history_timestamp_token) == 8,
VkResult
radv_rra_dump_trace(VkQueue vk_queue, char *filename)
{
- RADV_FROM_HANDLE(radv_queue, queue, vk_queue);
+ VK_FROM_HANDLE(radv_queue, queue, vk_queue);
struct radv_device *device = radv_queue_device(queue);
const struct radv_physical_device *pdev = radv_device_physical(device);
VkDevice vk_device = radv_device_to_handle(device);
diff --git a/src/amd/vulkan/radv_sampler.c b/src/amd/vulkan/radv_sampler.c
index fd5e50d8ea2..7ca4fdac9d4 100644
--- a/src/amd/vulkan/radv_sampler.c
+++ b/src/amd/vulkan/radv_sampler.c
@@ -268,7 +268,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateSampler(VkDevice _device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkSampler *pSampler)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_sampler *sampler;
sampler = vk_sampler_create(&device->vk, pCreateInfo, pAllocator, sizeof(*sampler));
@@ -285,8 +285,8 @@ radv_CreateSampler(VkDevice _device, const VkSamplerCreateInfo *pCreateInfo, con
VKAPI_ATTR void VKAPI_CALL
radv_DestroySampler(VkDevice _device, VkSampler _sampler, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_sampler, sampler, _sampler);
if (!sampler)
return;
diff --git a/src/amd/vulkan/radv_shader_object.c b/src/amd/vulkan/radv_shader_object.c
index 1d3d166e901..d035d64a161 100644
--- a/src/amd/vulkan/radv_shader_object.c
+++ b/src/amd/vulkan/radv_shader_object.c
@@ -55,8 +55,8 @@ radv_shader_object_destroy(struct radv_device *device, struct radv_shader_object
VKAPI_ATTR void VKAPI_CALL
radv_DestroyShaderEXT(VkDevice _device, VkShaderEXT shader, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_shader_object, shader_obj, shader);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_shader_object, shader_obj, shader);
if (!shader)
return;
@@ -80,7 +80,7 @@ radv_shader_stage_init(const VkShaderCreateInfoEXT *sinfo, struct radv_shader_st
out_stage->spirv.size = sinfo->codeSize;
for (uint32_t i = 0; i < sinfo->setLayoutCount; i++) {
- RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, sinfo->pSetLayouts[i]);
+ VK_FROM_HANDLE(radv_descriptor_set_layout, set_layout, sinfo->pSetLayouts[i]);
if (set_layout == NULL)
continue;
@@ -249,7 +249,7 @@ radv_get_shader_layout(const VkShaderCreateInfoEXT *pCreateInfo, struct radv_sha
layout->dynamic_offset_count = 0;
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) {
- RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[i]);
+ VK_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[i]);
if (set_layout == NULL)
continue;
@@ -385,7 +385,7 @@ static VkResult
radv_shader_object_create(VkDevice _device, const VkShaderCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkShaderEXT *pShader)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_shader_object *shader_obj;
VkResult result;
@@ -410,7 +410,7 @@ static VkResult
radv_shader_object_create_linked(VkDevice _device, uint32_t createInfoCount, const VkShaderCreateInfoEXT *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkShaderEXT *pShaders)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_shader_stage stages[MESA_VULKAN_SHADER_STAGES];
@@ -624,8 +624,8 @@ radv_write_shader_binary(struct blob *blob, const struct radv_shader_binary *bin
VKAPI_ATTR VkResult VKAPI_CALL
radv_GetShaderBinaryDataEXT(VkDevice _device, VkShaderEXT shader, size_t *pDataSize, void *pData)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_shader_object, shader_obj, shader);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_shader_object, shader_obj, shader);
const struct radv_physical_device *pdev = radv_device_physical(device);
const size_t size = radv_get_shader_object_size(shader_obj);
diff --git a/src/amd/vulkan/radv_video.c b/src/amd/vulkan/radv_video.c
index 9c673d97ab8..ad13fd6eee0 100644
--- a/src/amd/vulkan/radv_video.c
+++ b/src/amd/vulkan/radv_video.c
@@ -353,7 +353,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateVideoSessionKHR(VkDevice _device, const VkVideoSessionCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkVideoSessionKHR *pVideoSession)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
@@ -406,8 +406,8 @@ radv_CreateVideoSessionKHR(VkDevice _device, const VkVideoSessionCreateInfoKHR *
VKAPI_ATTR void VKAPI_CALL
radv_DestroyVideoSessionKHR(VkDevice _device, VkVideoSessionKHR _session, const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_video_session, vid, _session);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_video_session, vid, _session);
if (!_session)
return;
@@ -420,9 +420,9 @@ radv_CreateVideoSessionParametersKHR(VkDevice _device, const VkVideoSessionParam
const VkAllocationCallbacks *pAllocator,
VkVideoSessionParametersKHR *pVideoSessionParameters)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_video_session, vid, pCreateInfo->videoSession);
- RADV_FROM_HANDLE(radv_video_session_params, templ, pCreateInfo->videoSessionParametersTemplate);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_video_session, vid, pCreateInfo->videoSession);
+ VK_FROM_HANDLE(radv_video_session_params, templ, pCreateInfo->videoSessionParametersTemplate);
const struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
struct radv_video_session_params *params =
@@ -445,8 +445,8 @@ VKAPI_ATTR void VKAPI_CALL
radv_DestroyVideoSessionParametersKHR(VkDevice _device, VkVideoSessionParametersKHR _params,
const VkAllocationCallbacks *pAllocator)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_video_session_params, params, _params);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_video_session_params, params, _params);
vk_video_session_parameters_finish(&device->vk, &params->vk);
vk_free2(&device->vk.alloc, pAllocator, params);
@@ -456,7 +456,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_GetPhysicalDeviceVideoCapabilitiesKHR(VkPhysicalDevice physicalDevice, const VkVideoProfileInfoKHR *pVideoProfile,
VkVideoCapabilitiesKHR *pCapabilities)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
const struct video_codec_cap *cap = NULL;
switch (pVideoProfile->videoCodecOperation) {
@@ -667,8 +667,8 @@ radv_GetVideoSessionMemoryRequirementsKHR(VkDevice _device, VkVideoSessionKHR vi
uint32_t *pMemoryRequirementsCount,
VkVideoSessionMemoryRequirementsKHR *pMemoryRequirements)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_video_session, vid, videoSession);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_video_session, vid, videoSession);
const struct radv_physical_device *pdev = radv_device_physical(device);
uint32_t memory_type_bits = (1u << pdev->memory_properties.memoryTypeCount) - 1;
@@ -728,7 +728,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_UpdateVideoSessionParametersKHR(VkDevice _device, VkVideoSessionParametersKHR videoSessionParameters,
const VkVideoSessionParametersUpdateInfoKHR *pUpdateInfo)
{
- RADV_FROM_HANDLE(radv_video_session_params, params, videoSessionParameters);
+ VK_FROM_HANDLE(radv_video_session_params, params, videoSessionParameters);
return vk_video_session_parameters_update(&params->vk, pUpdateInfo);
}
@@ -745,7 +745,7 @@ VKAPI_ATTR VkResult VKAPI_CALL
radv_BindVideoSessionMemoryKHR(VkDevice _device, VkVideoSessionKHR videoSession, uint32_t videoSessionBindMemoryCount,
const VkBindVideoSessionMemoryInfoKHR *pBindSessionMemoryInfos)
{
- RADV_FROM_HANDLE(radv_video_session, vid, videoSession);
+ VK_FROM_HANDLE(radv_video_session, vid, videoSession);
for (unsigned i = 0; i < videoSessionBindMemoryCount; i++) {
switch (pBindSessionMemoryInfos[i].memoryBindIndex) {
@@ -2748,9 +2748,9 @@ ruvd_dec_message_create(struct radv_video_session *vid, void *ptr)
VKAPI_ATTR void VKAPI_CALL
radv_CmdBeginVideoCodingKHR(VkCommandBuffer commandBuffer, const VkVideoBeginCodingInfoKHR *pBeginInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- RADV_FROM_HANDLE(radv_video_session, vid, pBeginInfo->videoSession);
- RADV_FROM_HANDLE(radv_video_session_params, params, pBeginInfo->videoSessionParameters);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_video_session, vid, pBeginInfo->videoSession);
+ VK_FROM_HANDLE(radv_video_session_params, params, pBeginInfo->videoSessionParameters);
cmd_buffer->video.vid = vid;
cmd_buffer->video.params = params;
@@ -2832,7 +2832,7 @@ radv_uvd_cmd_reset(struct radv_cmd_buffer *cmd_buffer)
VKAPI_ATTR void VKAPI_CALL
radv_CmdControlVideoCodingKHR(VkCommandBuffer commandBuffer, const VkVideoCodingControlInfoKHR *pCodingControlInfo)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_physical_device *pdev = radv_device_physical(device);
@@ -2852,7 +2852,7 @@ radv_CmdEndVideoCodingKHR(VkCommandBuffer commandBuffer, const VkVideoEndCodingI
static void
radv_uvd_decode_video(struct radv_cmd_buffer *cmd_buffer, const VkVideoDecodeInfoKHR *frame_info)
{
- RADV_FROM_HANDLE(radv_buffer, src_buffer, frame_info->srcBuffer);
+ VK_FROM_HANDLE(radv_buffer, src_buffer, frame_info->srcBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_video_session *vid = cmd_buffer->video.vid;
@@ -2907,7 +2907,7 @@ radv_uvd_decode_video(struct radv_cmd_buffer *cmd_buffer, const VkVideoDecodeInf
static void
radv_vcn_decode_video(struct radv_cmd_buffer *cmd_buffer, const VkVideoDecodeInfoKHR *frame_info)
{
- RADV_FROM_HANDLE(radv_buffer, src_buffer, frame_info->srcBuffer);
+ VK_FROM_HANDLE(radv_buffer, src_buffer, frame_info->srcBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_video_session *vid = cmd_buffer->video.vid;
@@ -2992,7 +2992,7 @@ radv_vcn_decode_video(struct radv_cmd_buffer *cmd_buffer, const VkVideoDecodeInf
VKAPI_ATTR void VKAPI_CALL
radv_CmdDecodeVideoKHR(VkCommandBuffer commandBuffer, const VkVideoDecodeInfoKHR *frame_info)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_physical_device *pdev = radv_device_physical(device);
diff --git a/src/amd/vulkan/radv_wsi.c b/src/amd/vulkan/radv_wsi.c
index 8cd7230950d..078553724b4 100644
--- a/src/amd/vulkan/radv_wsi.c
+++ b/src/amd/vulkan/radv_wsi.c
@@ -36,7 +36,7 @@
static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
radv_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
- RADV_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
+ VK_FROM_HANDLE(radv_physical_device, pdev, physicalDevice);
const struct radv_instance *instance = radv_physical_device_instance(pdev);
return vk_instance_get_proc_addr_unchecked(&instance->vk, pName);
}
@@ -44,8 +44,8 @@ radv_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
static void
radv_wsi_set_memory_ownership(VkDevice _device, VkDeviceMemory _mem, VkBool32 ownership)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
+ VK_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device_memory, mem, _mem);
if (device->use_global_bo_list) {
device->ws->buffer_make_resident(device->ws, mem->bo, ownership);
@@ -55,7 +55,7 @@ radv_wsi_set_memory_ownership(VkDevice _device, VkDeviceMemory _mem, VkBool32 ow
static VkQueue
radv_wsi_get_prime_blit_queue(VkDevice _device)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
+ VK_FROM_HANDLE(radv_device, device, _device);
struct radv_physical_device *pdev = radv_device_physical(device);
const struct radv_instance *instance = radv_physical_device_instance(pdev);