summaryrefslogtreecommitdiff
path: root/drivers/accel/ivpu/ivpu_jsm_msg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/accel/ivpu/ivpu_jsm_msg.c')
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c259
1 files changed, 258 insertions, 1 deletions
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 8cea0dd731b9..e8dd73d947e4 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@@ -281,3 +281,260 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
return ivpu_hw_wait_for_idle(vdev);
}
+
+int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
+ u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_create_cmdq.host_ssid = ctx_id;
+ req.payload.hws_create_cmdq.process_id = pid;
+ req.payload.hws_create_cmdq.engine_idx = engine;
+ req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
+ req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
+ req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
+ req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
+ req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
+ u64 cmdq_base, u32 cmdq_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
+ struct vpu_jsm_msg resp;
+ int ret = 0;
+
+ req.payload.hws_register_db.db_id = db_id;
+ req.payload.hws_register_db.host_ssid = ctx_id;
+ req.payload.hws_register_db.cmdq_id = cmdq_id;
+ req.payload.hws_register_db.cmdq_base = cmdq_base;
+ req.payload.hws_register_db.cmdq_size = cmdq_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine >= VPU_ENGINE_NB)
+ return -EINVAL;
+
+ req.payload.hws_resume_engine.engine_idx = engine;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
+ u32 priority)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
+ req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
+ req.payload.hws_set_context_sched_properties.priority_band = priority;
+ req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
+ req.payload.hws_set_context_sched_properties.in_process_priority = 0;
+ req.payload.hws_set_context_sched_properties.context_quantum = 20000;
+ req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
+ req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
+ u64 vpu_log_buffer_va)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
+ req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
+ req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
+ req.payload.hws_set_scheduling_log.notify_index = 0;
+ req.payload.hws_set_scheduling_log.enable_extra_events =
+ ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ /* Idle */
+ req.payload.hws_priority_band_setup.grace_period[0] = 0;
+ req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
+ req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
+ /* Normal */
+ req.payload.hws_priority_band_setup.grace_period[1] = 50000;
+ req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
+ req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
+ /* Focus */
+ req.payload.hws_priority_band_setup.grace_period[2] = 50000;
+ req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
+ req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
+ /* Realtime */
+ req.payload.hws_priority_band_setup.grace_period[3] = 0;
+ req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
+ req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
+
+ req.payload.hws_priority_band_setup.normal_band_percentage = 10;
+
+ ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
+ u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
+ req.payload.metric_streamer_start.sampling_rate = sampling_rate;
+ req.payload.metric_streamer_start.buffer_addr = buffer_addr;
+ req.payload.metric_streamer_start.buffer_size = buffer_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
+ u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
+ req.payload.metric_streamer_update.buffer_addr = buffer_addr;
+ req.payload.metric_streamer_update.buffer_size = buffer_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
+ return ret;
+ }
+
+ if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
+ ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
+ resp.payload.metric_streamer_done.bytes_written, buffer_size);
+ return -EOVERFLOW;
+ }
+
+ *bytes_written = resp.payload.metric_streamer_done.bytes_written;
+
+ return ret;
+}
+
+int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
+ u64 buffer_size, u32 *sample_size, u64 *info_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
+ req.payload.metric_streamer_start.buffer_addr = buffer_addr;
+ req.payload.metric_streamer_start.buffer_size = buffer_size;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
+ return ret;
+ }
+
+ if (!resp.payload.metric_streamer_done.sample_size) {
+ ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
+ return -EBADMSG;
+ }
+
+ if (sample_size)
+ *sample_size = resp.payload.metric_streamer_done.sample_size;
+ if (info_size)
+ *info_size = resp.payload.metric_streamer_done.bytes_written;
+
+ return ret;
+}