summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLucas De Marchi <lucas.demarchi@intel.com>2024-11-04 22:48:10 -0800
committerLucas De Marchi <lucas.demarchi@intel.com>2024-11-04 22:48:10 -0800
commit9f13697e5965878d9c92132f02e84406c127d961 (patch)
tree33fd10d1515088c8fc9b905b55f02ade28d1666f
parentf9eea2e05e38aca32a1dda21906d035f10024530 (diff)
2024y-11m-05d-06h-47m-12s UTC: drm-tip rerere cache update
git version 2.47.0
-rw-r--r--rr-cache/73ff56776b8953effc8726c175203162435823e3/preimage1867
-rw-r--r--rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/postimage.1201
-rw-r--r--rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage205
-rw-r--r--rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage.1205
-rw-r--r--rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/postimage202
-rw-r--r--rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/preimage206
-rw-r--r--rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/postimage1355
-rw-r--r--rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/preimage1371
8 files changed, 5612 insertions, 0 deletions
diff --git a/rr-cache/73ff56776b8953effc8726c175203162435823e3/preimage b/rr-cache/73ff56776b8953effc8726c175203162435823e3/preimage
new file mode 100644
index 000000000000..f0b5e33deb6b
--- /dev/null
+++ b/rr-cache/73ff56776b8953effc8726c175203162435823e3/preimage
@@ -0,0 +1,1867 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_ct.h"
+
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/fault-inject.h>
+
+#include <kunit/static_stub.h>
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_sriov_abi.h"
+#include "abi/guc_klvs_abi.h"
+#include "xe_bo.h"
+#include "xe_devcoredump.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_guc.h"
+#include "xe_guc_log.h"
+#include "xe_guc_relay.h"
+#include "xe_guc_submit.h"
+#include "xe_map.h"
+#include "xe_pm.h"
+#include "xe_trace_guc.h"
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+enum {
+ /* Internal states, not error conditions */
+ CT_DEAD_STATE_REARM, /* 0x0001 */
+ CT_DEAD_STATE_CAPTURE, /* 0x0002 */
+
+ /* Error conditions */
+ CT_DEAD_SETUP, /* 0x0004 */
+ CT_DEAD_H2G_WRITE, /* 0x0008 */
+ CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
+ CT_DEAD_G2H_READ, /* 0x0020 */
+ CT_DEAD_G2H_RECV, /* 0x0040 */
+ CT_DEAD_G2H_RELEASE, /* 0x0080 */
+ CT_DEAD_DEADLOCK, /* 0x0100 */
+ CT_DEAD_PROCESS_FAILED, /* 0x0200 */
+ CT_DEAD_FAST_G2H, /* 0x0400 */
+ CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
+ CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
+ CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
+ CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
+};
+
+static void ct_dead_worker_func(struct work_struct *w);
+static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
+
+#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
+#else
+#define CT_DEAD(ct, ctb, reason) \
+ do { \
+ struct guc_ctb *_ctb = (ctb); \
+ if (_ctb) \
+ _ctb->info.broken = true; \
+ } while (0)
+#endif
+
+/* Used when a CT send wants to block and / or receive data */
+struct g2h_fence {
+ u32 *response_buffer;
+ u32 seqno;
+ u32 response_data;
+ u16 response_len;
+ u16 error;
+ u16 hint;
+ u16 reason;
+ bool retry;
+ bool fail;
+ bool done;
+};
+
+static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
+{
+ g2h_fence->response_buffer = response_buffer;
+ g2h_fence->response_data = 0;
+ g2h_fence->response_len = 0;
+ g2h_fence->fail = false;
+ g2h_fence->retry = false;
+ g2h_fence->done = false;
+ g2h_fence->seqno = ~0x0;
+}
+
+static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
+{
+ return g2h_fence->seqno == ~0x0;
+}
+
+static struct xe_guc *
+ct_to_guc(struct xe_guc_ct *ct)
+{
+ return container_of(ct, struct xe_guc, ct);
+}
+
+static struct xe_gt *
+ct_to_gt(struct xe_guc_ct *ct)
+{
+ return container_of(ct, struct xe_gt, uc.guc.ct);
+}
+
+static struct xe_device *
+ct_to_xe(struct xe_guc_ct *ct)
+{
+ return gt_to_xe(ct_to_gt(ct));
+}
+
+/**
+ * DOC: GuC CTB Blob
+ *
+ * We allocate single blob to hold both CTB descriptors and buffers:
+ *
+ * +--------+-----------------------------------------------+------+
+ * | offset | contents | size |
+ * +========+===============================================+======+
+ * | 0x0000 | H2G CTB Descriptor (send) | |
+ * +--------+-----------------------------------------------+ 4K |
+ * | 0x0800 | G2H CTB Descriptor (g2h) | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | H2G CT Buffer (send) | n*4K |
+ * | | | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
+ * | + n*4K | | |
+ * +--------+-----------------------------------------------+------+
+ *
+ * Size of each ``CT Buffer`` must be multiple of 4K.
+ * We don't expect too many messages in flight at any time, unless we are
+ * using the GuC submission. In that case each request requires a minimum
+ * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
+ * enough space to avoid backpressure on the driver. We increase the size
+ * of the receive buffer (relative to the send) to ensure a G2H response
+ * CTB has a landing spot.
+ *
+ * In addition to submissions, the G2H buffer needs to be able to hold
+ * enough space for recoverable page fault notifications. The number of
+ * page faults is interrupt driven and can be as much as the number of
+ * compute resources available. However, most of the actual work for these
+ * is in a separate page fault worker thread. Therefore we only need to
+ * make sure the queue has enough space to handle all of the submissions
+ * and responses and an extra buffer for incoming page faults.
+ */
+
+#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_SIZE (SZ_4K)
+#define CTB_G2H_BUFFER_SIZE (SZ_128K)
+#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
+
+/**
+ * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
+ * CT command queue
+ * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
+ *
+ * Observation is that a 4KiB buffer full of commands takes a little over a
+ * second to process. Use that to calculate maximum time to process a full CT
+ * command queue.
+ *
+ * Return: Maximum time to process a full CT queue in jiffies.
+ */
+long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
+ return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
+}
+
+static size_t guc_ct_size(void)
+{
+ return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
+ CTB_G2H_BUFFER_SIZE;
+}
+
+static void guc_ct_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_ct *ct = arg;
+
+ destroy_workqueue(ct->g2h_wq);
+ xa_destroy(&ct->fence_lookup);
+}
+
+static void receive_g2h(struct xe_guc_ct *ct);
+static void g2h_worker_func(struct work_struct *w);
+static void safe_mode_worker_func(struct work_struct *w);
+
+static void primelockdep(struct xe_guc_ct *ct)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&ct->lock);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+int xe_guc_ct_init(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ int err;
+
+ xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
+
+ ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
+ if (!ct->g2h_wq)
+ return -ENOMEM;
+
+ spin_lock_init(&ct->fast_lock);
+ xa_init(&ct->fence_lookup);
+ INIT_WORK(&ct->g2h_worker, g2h_worker_func);
+ INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ spin_lock_init(&ct->dead.lock);
+ INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
+#endif
+ init_waitqueue_head(&ct->wq);
+ init_waitqueue_head(&ct->g2h_fence_wq);
+
+ err = drmm_mutex_init(&xe->drm, &ct->lock);
+ if (err)
+ return err;
+
+ primelockdep(ct);
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ct->bo = bo;
+
+ err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
+ if (err)
+ return err;
+
+ xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
+ ct->state = XE_GUC_CT_STATE_DISABLED;
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
+
+#define desc_read(xe_, guc_ctb__, field_) \
+ xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
+ struct guc_ct_buffer_desc, field_)
+
+#define desc_write(xe_, guc_ctb__, field_, val_) \
+ xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
+ struct guc_ct_buffer_desc, field_, val_)
+
+static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
+ struct iosys_map *map)
+{
+ h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
+ h2g->info.resv_space = 0;
+ h2g->info.tail = 0;
+ h2g->info.head = 0;
+ h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
+ h2g->info.size) -
+ h2g->info.resv_space;
+ h2g->info.broken = false;
+
+ h2g->desc = *map;
+ xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
+
+ h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
+}
+
+static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
+ struct iosys_map *map)
+{
+ g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
+ g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
+ g2h->info.head = 0;
+ g2h->info.tail = 0;
+ g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
+ g2h->info.size) -
+ g2h->info.resv_space;
+ g2h->info.broken = false;
+
+ g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
+ xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
+
+ g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
+ CTB_H2G_BUFFER_SIZE);
+}
+
+static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
+{
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 desc_addr, ctb_addr, size;
+ int err;
+
+ desc_addr = xe_bo_ggtt_addr(ct->bo);
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
+ size = ct->ctbs.h2g.info.size * sizeof(u32);
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (err)
+ return err;
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
+ ctb_addr);
+ if (err)
+ return err;
+
+ return xe_guc_self_cfg32(guc,
+ GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
+ size);
+}
+
+static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
+{
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 desc_addr, ctb_addr, size;
+ int err;
+
+ desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
+ CTB_H2G_BUFFER_SIZE;
+ size = ct->ctbs.g2h.info.size * sizeof(u32);
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (err)
+ return err;
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
+ ctb_addr);
+ if (err)
+ return err;
+
+ return xe_guc_self_cfg32(guc,
+ GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
+ size);
+}
+
+static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
+{
+ u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
+ GUC_ACTION_HOST2GUC_CONTROL_CTB),
+ FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
+ enable ? GUC_CTB_CONTROL_ENABLE :
+ GUC_CTB_CONTROL_DISABLE),
+ };
+ int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
+ enum xe_guc_ct_state state)
+{
+ mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
+ spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
+
+ xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
+ state == XE_GUC_CT_STATE_STOPPED);
+
+ if (ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
+ ct->g2h_outstanding = 0;
+ ct->state = state;
+
+ spin_unlock_irq(&ct->fast_lock);
+
+ /*
+ * Lockdep doesn't like this under the fast lock and he destroy only
+ * needs to be serialized with the send path which ct lock provides.
+ */
+ xa_destroy(&ct->fence_lookup);
+
+ mutex_unlock(&ct->lock);
+}
+
+static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
+{
+ return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
+}
+
+static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
+{
+ if (!ct_needs_safe_mode(ct))
+ return false;
+
+ queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
+ return true;
+}
+
+static void safe_mode_worker_func(struct work_struct *w)
+{
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
+
+ receive_g2h(ct);
+
+ if (!ct_restart_safe_mode_worker(ct))
+ xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
+}
+
+static void ct_enter_safe_mode(struct xe_guc_ct *ct)
+{
+ if (ct_restart_safe_mode_worker(ct))
+ xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
+}
+
+static void ct_exit_safe_mode(struct xe_guc_ct *ct)
+{
+ if (cancel_delayed_work_sync(&ct->safe_mode_worker))
+ xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
+}
+
+int xe_guc_ct_enable(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ int err;
+
+ xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
+
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
+ guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
+ guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
+
+ err = guc_ct_ctb_h2g_register(ct);
+ if (err)
+ goto err_out;
+
+ err = guc_ct_ctb_g2h_register(ct);
+ if (err)
+ goto err_out;
+
+ err = guc_ct_control_toggle(ct, true);
+ if (err)
+ goto err_out;
+
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
+
+ smp_mb();
+ wake_up_all(&ct->wq);
+ xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
+
+ if (ct_needs_safe_mode(ct))
+ ct_enter_safe_mode(ct);
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ /*
+ * The CT has now been reset so the dumper can be re-armed
+ * after any existing dead state has been dumped.
+ */
+ spin_lock_irq(&ct->dead.lock);
+ if (ct->dead.reason)
+ ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
+ spin_unlock_irq(&ct->dead.lock);
+#endif
+
+ return 0;
+
+err_out:
+ xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
+ CT_DEAD(ct, NULL, SETUP);
+
+ return err;
+}
+
+static void stop_g2h_handler(struct xe_guc_ct *ct)
+{
+ cancel_work_sync(&ct->g2h_worker);
+}
+
+/**
+ * xe_guc_ct_disable - Set GuC to disabled state
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
+ * in this transition.
+ */
+void xe_guc_ct_disable(struct xe_guc_ct *ct)
+{
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
+ ct_exit_safe_mode(ct);
+ stop_g2h_handler(ct);
+}
+
+/**
+ * xe_guc_ct_stop - Set GuC to stopped state
+ * @ct: the &xe_guc_ct
+ *
+ * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
+ */
+void xe_guc_ct_stop(struct xe_guc_ct *ct)
+{
+ xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
+ stop_g2h_handler(ct);
+}
+
+static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
+{
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+
+ lockdep_assert_held(&ct->lock);
+
+ if (cmd_len > h2g->info.space) {
+ h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+
+ if (h2g->info.head > h2g->info.size) {
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 desc_status = desc_read(xe, h2g, status);
+
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+
+ xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
+ h2g->info.head, h2g->info.size);
+ CT_DEAD(ct, h2g, H2G_HAS_ROOM);
+ return false;
+ }
+
+ h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
+ h2g->info.size) -
+ h2g->info.resv_space;
+ if (cmd_len > h2g->info.space)
+ return false;
+ }
+
+ return true;
+}
+
+static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
+{
+ if (!g2h_len)
+ return true;
+
+ lockdep_assert_held(&ct->fast_lock);
+
+ return ct->ctbs.g2h.info.space > g2h_len;
+}
+
+static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
+{
+ lockdep_assert_held(&ct->lock);
+
+ if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
+{
+ lockdep_assert_held(&ct->lock);
+ ct->ctbs.h2g.info.space -= cmd_len;
+}
+
+static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
+{
+ xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
+ xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
+ (g2h_len && num_g2h));
+
+ if (g2h_len) {
+ lockdep_assert_held(&ct->fast_lock);
+
+ if (!ct->g2h_outstanding)
+ xe_pm_runtime_get_noresume(ct_to_xe(ct));
+
+ ct->ctbs.g2h.info.space -= g2h_len;
+ ct->g2h_outstanding += num_g2h;
+ }
+}
+
+static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
+{
+ bool bad = false;
+
+ lockdep_assert_held(&ct->fast_lock);
+
+ bad = ct->ctbs.g2h.info.space + g2h_len >
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
+ bad |= !ct->g2h_outstanding;
+
+ if (bad) {
+ xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
+ ct->ctbs.g2h.info.space, g2h_len,
+ ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
+ ct->ctbs.g2h.info.space + g2h_len,
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
+ ct->g2h_outstanding);
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
+ return;
+ }
+
+ ct->ctbs.g2h.info.space += g2h_len;
+ if (!--ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
+}
+
+static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
+{
+ spin_lock_irq(&ct->fast_lock);
+ __g2h_release_space(ct, g2h_len);
+ spin_unlock_irq(&ct->fast_lock);
+}
+
+#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
+
+static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 ct_fence_value, bool want_response)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+ u32 cmd[H2G_CT_HEADERS];
+ u32 tail = h2g->info.tail;
+ u32 full_len;
+ struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
+ tail * sizeof(u32));
+ u32 desc_status;
+
+ full_len = len + GUC_CTB_HDR_LEN;
+
+ lockdep_assert_held(&ct->lock);
+ xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
+
+ desc_status = desc_read(xe, h2g, status);
+ if (desc_status) {
+ xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, h2g, tail);
+ u32 desc_head = desc_read(xe, h2g, head);
+
+ if (tail != desc_tail) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
+ goto corrupted;
+ }
+
+ if (tail > h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
+ tail, h2g->info.size);
+ goto corrupted;
+ }
+
+ if (desc_head >= h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
+ desc_head, h2g->info.size);
+ goto corrupted;
+ }
+ }
+
+ /* Command will wrap, zero fill (NOPs), return and check credits again */
+ if (tail + full_len > h2g->info.size) {
+ xe_map_memset(xe, &map, 0, 0,
+ (h2g->info.size - tail) * sizeof(u32));
+ h2g_reserve_space(ct, (h2g->info.size - tail));
+ h2g->info.tail = 0;
+ desc_write(xe, h2g, tail, h2g->info.tail);
+
+ return -EAGAIN;
+ }
+
+ /*
+ * dw0: CT header (including fence)
+ * dw1: HXG header (including action code)
+ * dw2+: action data
+ */
+ cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
+ FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
+ FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
+ if (want_response) {
+ cmd[1] =
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
+ GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ } else {
+ cmd[1] =
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
+ GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ }
+
+ /* H2G header in cmd[1] replaces action[0] so: */
+ --len;
+ ++action;
+
+ /* Write H2G ensuring visable before descriptor update */
+ xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
+ xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
+ xe_device_wmb(xe);
+
+ /* Update local copies */
+ h2g->info.tail = (tail + full_len) % h2g->info.size;
+ h2g_reserve_space(ct, full_len);
+
+ /* Update descriptor */
+ desc_write(xe, h2g, tail, h2g->info.tail);
+
+ trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
+ desc_read(xe, h2g, head), h2g->info.tail);
+
+ return 0;
+
+corrupted:
+ CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
+ return -EPIPE;
+}
+
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
+static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
+ u32 len, u32 g2h_len, u32 num_g2h,
+ struct g2h_fence *g2h_fence)
+{
+ struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
+ u16 seqno;
+ int ret;
+
+ xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ xe_gt_assert(gt, !num_g2h || !g2h_fence);
+ xe_gt_assert(gt, !g2h_len || num_g2h);
+ xe_gt_assert(gt, g2h_len || !num_g2h);
+ lockdep_assert_held(&ct->lock);
+
+ if (unlikely(ct->ctbs.h2g.info.broken)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (ct->state == XE_GUC_CT_STATE_DISABLED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ct->state == XE_GUC_CT_STATE_STOPPED) {
+ ret = -ECANCELED;
+ goto out;
+ }
+
+ xe_gt_assert(gt, xe_guc_ct_enabled(ct));
+
+ if (g2h_fence) {
+ g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
+ num_g2h = 1;
+
+ if (g2h_fence_needs_alloc(g2h_fence)) {
+ g2h_fence->seqno = next_ct_seqno(ct, true);
+ ret = xa_err(xa_store(&ct->fence_lookup,
+ g2h_fence->seqno, g2h_fence,
+ GFP_ATOMIC));
+ if (ret)
+ goto out;
+ }
+
+ seqno = g2h_fence->seqno;
+ } else {
+ seqno = next_ct_seqno(ct, false);
+ }
+
+ if (g2h_len)
+ spin_lock_irq(&ct->fast_lock);
+retry:
+ ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
+ if (unlikely(ret))
+ goto out_unlock;
+
+ ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
+ if (unlikely(ret)) {
+ if (ret == -EAGAIN)
+ goto retry;
+ goto out_unlock;
+ }
+
+ __g2h_reserve_space(ct, g2h_len, num_g2h);
+ xe_guc_notify(ct_to_guc(ct));
+out_unlock:
+ if (g2h_len)
+ spin_unlock_irq(&ct->fast_lock);
+out:
+ return ret;
+}
+
+static void kick_reset(struct xe_guc_ct *ct)
+{
+ xe_gt_reset_async(ct_to_gt(ct));
+}
+
+static int dequeue_one_g2h(struct xe_guc_ct *ct);
+
+static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h,
+ struct g2h_fence *g2h_fence)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ unsigned int sleep_period_ms = 1;
+ int ret;
+
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ lockdep_assert_held(&ct->lock);
+ xe_device_assert_mem_access(ct_to_xe(ct));
+
+try_again:
+ ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
+ g2h_fence);
+
+ /*
+ * We wait to try to restore credits for about 1 second before bailing.
+ * In the case of H2G credits we have no choice but just to wait for the
+ * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
+ * the case of G2H we process any G2H in the channel, hopefully freeing
+ * credits as we consume the G2H messages.
+ */
+ if (unlikely(ret == -EBUSY &&
+ !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+
+ if (sleep_period_ms == 1024)
+ goto broken;
+
+ trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
+ h2g->info.size,
+ h2g->info.space,
+ len + GUC_CTB_HDR_LEN);
+ msleep(sleep_period_ms);
+ sleep_period_ms <<= 1;
+
+ goto try_again;
+ } else if (unlikely(ret == -EBUSY)) {
+ struct xe_device *xe = ct_to_xe(ct);
+ struct guc_ctb *g2h = &ct->ctbs.g2h;
+
+ trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
+ desc_read(xe, g2h, tail),
+ g2h->info.size,
+ g2h->info.space,
+ g2h_fence ?
+ GUC_CTB_HXG_MSG_MAX_LEN :
+ g2h_len);
+
+#define g2h_avail(ct) \
+ (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
+ if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
+ g2h_avail(ct), HZ))
+ goto broken;
+#undef g2h_avail
+
+ ret = dequeue_one_g2h(ct);
+ if (ret < 0) {
+ if (ret != -ECANCELED)
+ xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
+ ERR_PTR(ret));
+ goto broken;
+ }
+
+ goto try_again;
+ }
+
+ return ret;
+
+broken:
+ xe_gt_err(gt, "No forward process on H2G, reset required\n");
+ CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
+
+ return -EDEADLK;
+}
+
+static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
+{
+ int ret;
+
+ xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
+
+ mutex_lock(&ct->lock);
+ ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
+ mutex_unlock(&ct->lock);
+
+ return ret;
+}
+
+int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h)
+{
+ int ret;
+
+ ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ return ret;
+}
+
+int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h)
+{
+ int ret;
+
+ ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ return ret;
+}
+
+int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
+{
+ int ret;
+
+ lockdep_assert_held(&ct->lock);
+
+ ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ return ret;
+}
+
+/*
+ * Check if a GT reset is in progress or will occur and if GT reset brought the
+ * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
+ */
+static bool retry_failure(struct xe_guc_ct *ct, int ret)
+{
+ if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
+ return false;
+
+#define ct_alive(ct) \
+ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
+ !ct->ctbs.g2h.info.broken)
+ if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
+ return false;
+#undef ct_alive
+
+ return true;
+}
+
+static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buffer, bool no_fail)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct g2h_fence g2h_fence;
+ int ret = 0;
+
+ /*
+ * We use a fence to implement blocking sends / receiving response data.
+ * The seqno of the fence is sent in the H2G, returned in the G2H, and
+ * an xarray is used as storage media with the seqno being to key.
+ * Fields in the fence hold success, failure, retry status and the
+ * response data. Safe to allocate on the stack as the xarray is the
+ * only reference and it cannot be present after this function exits.
+ */
+retry:
+ g2h_fence_init(&g2h_fence, response_buffer);
+retry_same_fence:
+ ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
+ if (unlikely(ret == -ENOMEM)) {
+ /* Retry allocation /w GFP_KERNEL */
+ ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
+ &g2h_fence, GFP_KERNEL));
+ if (ret)
+ return ret;
+
+ goto retry_same_fence;
+ } else if (unlikely(ret)) {
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ if (no_fail && retry_failure(ct, ret))
+ goto retry_same_fence;
+
+ if (!g2h_fence_needs_alloc(&g2h_fence))
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
+
+ return ret;
+ }
+
+ ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+
+<<<<<<<
+=======
+ if (!ret) {
+ LNL_FLUSH_WORK(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+ ret = 1;
+ }
+ }
+
+ /*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+>>>>>>>
+ if (!ret) {
+ LNL_FLUSH_WORK(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+ ret = 1;
+ }
+ }
+
+ /*
+ * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
+ * the stack, since we have no clue if it will fire after the timeout before we can erase
+ * from the xa. Also we have some dependent loads and stores below for which we need the
+ * correct ordering, and we lack the needed barriers.
+ */
+ mutex_lock(&ct->lock);
+ if (!ret) {
+ xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
+ g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
+ mutex_unlock(&ct->lock);
+ return -ETIME;
+ }
+
+ if (g2h_fence.retry) {
+ xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
+ action[0], g2h_fence.reason);
+ mutex_unlock(&ct->lock);
+ goto retry;
+ }
+ if (g2h_fence.fail) {
+ xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
+ action[0], g2h_fence.error, g2h_fence.hint);
+ ret = -EIO;
+ }
+
+ if (ret > 0)
+ ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
+
+ mutex_unlock(&ct->lock);
+
+ return ret;
+}
+
+/**
+ * xe_guc_ct_send_recv - Send and receive HXG to the GuC
+ * @ct: the &xe_guc_ct
+ * @action: the dword array with `HXG Request`_ message (can't be NULL)
+ * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
+ * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
+ *
+ * Send a `HXG Request`_ message to the GuC over CT communication channel and
+ * blocks until GuC replies with a `HXG Response`_ message.
+ *
+ * For non-blocking communication with GuC use xe_guc_ct_send().
+ *
+ * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
+ *
+ * Return: response length (in dwords) if &response_buffer was not NULL, or
+ * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
+ * a negative error code on failure.
+ */
+int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buffer)
+{
+ KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
+ return guc_ct_send_recv(ct, action, len, response_buffer, false);
+}
+
+int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
+ u32 len, u32 *response_buffer)
+{
+ return guc_ct_send_recv(ct, action, len, response_buffer, true);
+}
+
+static u32 *msg_to_hxg(u32 *msg)
+{
+ return msg + GUC_CTB_MSG_MIN_LEN;
+}
+
+static u32 msg_len_to_hxg_len(u32 len)
+{
+ return len - GUC_CTB_MSG_MIN_LEN;
+}
+
+static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+
+ lockdep_assert_held(&ct->lock);
+
+ switch (action) {
+ case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ g2h_release_space(ct, len);
+ }
+
+ return 0;
+}
+
+static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
+ u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
+ struct g2h_fence *g2h_fence;
+
+ lockdep_assert_held(&ct->lock);
+
+ /*
+ * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
+ * Those messages should never fail, so if we do get an error back it
+ * means we're likely doing an illegal operation and the GuC is
+ * rejecting it. We have no way to inform the code that submitted the
+ * H2G that the message was rejected, so we need to escalate the
+ * failure to trigger a reset.
+ */
+ if (fence & CT_SEQNO_UNTRACKED) {
+ if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
+ xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
+ fence,
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
+ else
+ xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
+ type, fence);
+ CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
+
+ return -EPROTO;
+ }
+
+ g2h_fence = xa_erase(&ct->fence_lookup, fence);
+ if (unlikely(!g2h_fence)) {
+ /* Don't tear down channel, as send could've timed out */
+ /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
+ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
+ g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+ return 0;
+ }
+
+ xe_gt_assert(gt, fence == g2h_fence->seqno);
+
+ if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
+ g2h_fence->fail = true;
+ g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
+ g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
+ } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ g2h_fence->retry = true;
+ g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
+ } else if (g2h_fence->response_buffer) {
+ g2h_fence->response_len = hxg_len;
+ memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
+ } else {
+ g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
+ }
+
+ g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+
+ g2h_fence->done = true;
+ smp_mb();
+
+ wake_up_all(&ct->g2h_fence_wq);
+
+ return 0;
+}
+
+static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 origin, type;
+ int ret;
+
+ lockdep_assert_held(&ct->lock);
+
+ origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
+ if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
+ xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
+ origin);
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
+
+ return -EPROTO;
+ }
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
+ switch (type) {
+ case GUC_HXG_TYPE_EVENT:
+ ret = parse_g2h_event(ct, msg, len);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ ret = parse_g2h_response(ct, msg, len);
+ break;
+ default:
+ xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
+ type);
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
+
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action, adj_len;
+ u32 *payload;
+ int ret = 0;
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
+ return 0;
+
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
+ adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
+
+ switch (action) {
+ case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ ret = xe_guc_sched_done_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
+ ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
+ ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
+ /* Selftest only at the moment */
+ break;
+ case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = xe_guc_error_capture_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
+ /* FIXME: Handle this */
+ break;
+ case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
+ ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ ret = xe_guc_pagefault_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
+ ret = xe_guc_access_counter_notify_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
+ break;
+ case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
+ break;
+ case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
+ ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
+ break;
+ case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
+ ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
+ break;
+ default:
+ xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
+ }
+
+ if (ret) {
+ xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ action, ERR_PTR(ret));
+ CT_DEAD(ct, NULL, PROCESS_FAILED);
+ }
+
+ return 0;
+}
+
+static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct guc_ctb *g2h = &ct->ctbs.g2h;
+ u32 tail, head, len, desc_status;
+ s32 avail;
+ u32 action;
+ u32 *hxg;
+
+ xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ lockdep_assert_held(&ct->fast_lock);
+
+ if (ct->state == XE_GUC_CT_STATE_DISABLED)
+ return -ENODEV;
+
+ if (ct->state == XE_GUC_CT_STATE_STOPPED)
+ return -ECANCELED;
+
+ if (g2h->info.broken)
+ return -EPIPE;
+
+ xe_gt_assert(gt, xe_guc_ct_enabled(ct));
+
+ desc_status = desc_read(xe, g2h, status);
+ if (desc_status) {
+ if (desc_status & GUC_CTB_STATUS_DISABLED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
+ desc_status &= ~GUC_CTB_STATUS_DISABLED;
+ }
+
+ if (desc_status) {
+ xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, g2h, tail);
+ /*
+ u32 desc_head = desc_read(xe, g2h, head);
+
+ * info.head and desc_head are updated back-to-back at the end of
+ * this function and nowhere else. Hence, they cannot be different
+ * unless two g2h_read calls are running concurrently. Which is not
+ * possible because it is guarded by ct->fast_lock. And yet, some
+ * discrete platforms are reguarly hitting this error :(.
+ *
+ * desc_head rolling backwards shouldn't cause any noticeable
+ * problems - just a delay in GuC being allowed to proceed past that
+ * point in the queue. So for now, just disable the error until it
+ * can be root caused.
+ *
+ if (g2h->info.head != desc_head) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT read: head was modified %u != %u\n",
+ desc_head, g2h->info.head);
+ goto corrupted;
+ }
+ */
+
+ if (g2h->info.head > g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
+ g2h->info.head, g2h->info.size);
+ goto corrupted;
+ }
+
+ if (desc_tail >= g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
+ desc_tail, g2h->info.size);
+ goto corrupted;
+ }
+ }
+
+ /* Calculate DW available to read */
+ tail = desc_read(xe, g2h, tail);
+ avail = tail - g2h->info.head;
+ if (unlikely(avail == 0))
+ return 0;
+
+ if (avail < 0)
+ avail += g2h->info.size;
+
+ /* Read header */
+ xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
+ sizeof(u32));
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
+ if (len > avail) {
+ xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
+ avail, len);
+ goto corrupted;
+ }
+
+ head = (g2h->info.head + 1) % g2h->info.size;
+ avail = len - 1;
+
+ /* Read G2H message */
+ if (avail + head > g2h->info.size) {
+ u32 avail_til_wrap = g2h->info.size - head;
+
+ xe_map_memcpy_from(xe, msg + 1,
+ &g2h->cmds, sizeof(u32) * head,
+ avail_til_wrap * sizeof(u32));
+ xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
+ &g2h->cmds, 0,
+ (avail - avail_til_wrap) * sizeof(u32));
+ } else {
+ xe_map_memcpy_from(xe, msg + 1,
+ &g2h->cmds, sizeof(u32) * head,
+ avail * sizeof(u32));
+ }
+
+ hxg = msg_to_hxg(msg);
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+
+ if (fast_path) {
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
+ return 0;
+
+ switch (action) {
+ case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ break; /* Process these in fast-path */
+ default:
+ return 0;
+ }
+ }
+
+ /* Update local / descriptor header */
+ g2h->info.head = (head + avail) % g2h->info.size;
+ desc_write(xe, g2h, head, g2h->info.head);
+
+ trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
+ action, len, g2h->info.head, tail);
+
+ return len;
+
+corrupted:
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
+ return -EPROTO;
+}
+
+static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 hxg_len = msg_len_to_hxg_len(len);
+ u32 *hxg = msg_to_hxg(msg);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
+ u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
+ int ret = 0;
+
+ switch (action) {
+ case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ ret = xe_guc_pagefault_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ __g2h_release_space(ct, len);
+ ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
+ adj_len);
+ break;
+ default:
+ xe_gt_warn(gt, "NOT_POSSIBLE");
+ }
+
+ if (ret) {
+ xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
+ action, ERR_PTR(ret));
+ CT_DEAD(ct, NULL, FAST_G2H);
+ }
+}
+
+/**
+ * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
+ * @ct: GuC CT object
+ *
+ * Anything related to page faults is critical for performance, process these
+ * critical G2H in the IRQ. This is safe as these handlers either just wake up
+ * waiters or queue another worker.
+ */
+void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ bool ongoing;
+ int len;
+
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
+ if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
+ return;
+
+ spin_lock(&ct->fast_lock);
+ do {
+ len = g2h_read(ct, ct->fast_msg, true);
+ if (len > 0)
+ g2h_fast_path(ct, ct->fast_msg, len);
+ } while (len > 0);
+ spin_unlock(&ct->fast_lock);
+
+ if (ongoing)
+ xe_pm_runtime_put(xe);
+}
+
+/* Returns less than zero on error, 0 on done, 1 on more available */
+static int dequeue_one_g2h(struct xe_guc_ct *ct)
+{
+ int len;
+ int ret;
+
+ lockdep_assert_held(&ct->lock);
+
+ spin_lock_irq(&ct->fast_lock);
+ len = g2h_read(ct, ct->msg, false);
+ spin_unlock_irq(&ct->fast_lock);
+ if (len <= 0)
+ return len;
+
+ ret = parse_g2h_msg(ct, ct->msg, len);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = process_g2h_msg(ct, ct->msg, len);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 1;
+}
+
+static void receive_g2h(struct xe_guc_ct *ct)
+{
+ bool ongoing;
+ int ret;
+
+ /*
+ * Normal users must always hold mem_access.ref around CT calls. However
+ * during the runtime pm callbacks we rely on CT to talk to the GuC, but
+ * at this stage we can't rely on mem_access.ref and even the
+ * callback_task will be different than current. For such cases we just
+ * need to ensure we always process the responses from any blocking
+ * ct_send requests or where we otherwise expect some response when
+ * initiated from those callbacks (which will need to wait for the below
+ * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
+ * the device has suspended to the point that the CT communication has
+ * been disabled.
+ *
+ * If we are inside the runtime pm callback, we can be the only task
+ * still issuing CT requests (since that requires having the
+ * mem_access.ref). It seems like it might in theory be possible to
+ * receive unsolicited events from the GuC just as we are
+ * suspending-resuming, but those will currently anyway be lost when
+ * eventually exiting from suspend, hence no need to wake up the device
+ * here. If we ever need something stronger than get_if_ongoing() then
+ * we need to be careful with blocking the pm callbacks from getting CT
+ * responses, if the worker here is blocked on those callbacks
+ * completing, creating a deadlock.
+ */
+ ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
+ if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
+ return;
+
+ do {
+ mutex_lock(&ct->lock);
+ ret = dequeue_one_g2h(ct);
+ mutex_unlock(&ct->lock);
+
+ if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
+ xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
+ CT_DEAD(ct, NULL, G2H_RECV);
+ kick_reset(ct);
+ }
+ } while (ret == 1);
+
+ if (ongoing)
+ xe_pm_runtime_put(ct_to_xe(ct));
+}
+
+static void g2h_worker_func(struct work_struct *w)
+{
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
+
+ receive_g2h(ct);
+}
+
+static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
+{
+ struct xe_guc_ct_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot)
+ return NULL;
+
+ if (ct->bo && want_ctb) {
+ snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
+ }
+
+ return snapshot;
+}
+
+static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
+ struct guc_ctb_snapshot *snapshot)
+{
+ xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
+ sizeof(struct guc_ct_buffer_desc));
+ memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
+}
+
+static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ drm_printf(p, "\tsize: %d\n", snapshot->info.size);
+ drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
+ drm_printf(p, "\thead: %d\n", snapshot->info.head);
+ drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
+ drm_printf(p, "\tspace: %d\n", snapshot->info.space);
+ drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
+ drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
+ drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
+ drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
+}
+
+static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc_ct_snapshot *snapshot;
+
+ snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
+ if (!snapshot) {
+ xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
+ return NULL;
+ }
+
+ if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
+ snapshot->ct_enabled = true;
+ snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
+ }
+
+ if (ct->bo && snapshot->ctb)
+ xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
+
+ return snapshot;
+}
+
+/**
+ * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
+ * @ct: GuC CT object.
+ *
+ * This can be printed out in a later stage like during dev_coredump
+ * analysis. This is safe to be called during atomic context.
+ *
+ * Returns: a GuC CT snapshot object that must be freed by the caller
+ * by using `xe_guc_ct_snapshot_free`.
+ */
+struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
+{
+ return guc_ct_snapshot_capture(ct, true, true);
+}
+
+/**
+ * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
+ * @snapshot: GuC CT snapshot object.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function prints out a given GuC CT snapshot object.
+ */
+void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
+
+ if (snapshot->ct_enabled) {
+ drm_puts(p, "H2G CTB (all sizes in DW):\n");
+ guc_ctb_snapshot_print(&snapshot->h2g, p);
+
+ drm_puts(p, "G2H CTB (all sizes in DW):\n");
+ guc_ctb_snapshot_print(&snapshot->g2h, p);
+ drm_printf(p, "\tg2h outstanding: %d\n",
+ snapshot->g2h_outstanding);
+
+ if (snapshot->ctb)
+ xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
+ } else {
+ drm_puts(p, "CT disabled\n");
+ }
+}
+
+/**
+ * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
+ * @snapshot: GuC CT snapshot object.
+ *
+ * This function free all the memory that needed to be allocated at capture
+ * time.
+ */
+void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ kfree(snapshot->ctb);
+ kfree(snapshot);
+}
+
+/**
+ * xe_guc_ct_print - GuC CT Print.
+ * @ct: GuC CT.
+ * @p: drm_printer where it will be printed out.
+ * @want_ctb: Should the full CTB content be dumped (vs just the headers)
+ *
+ * This function will quickly capture a snapshot of the CT state
+ * and immediately print it out.
+ */
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
+{
+ struct xe_guc_ct_snapshot *snapshot;
+
+ snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
+ xe_guc_ct_snapshot_print(snapshot, p);
+ xe_guc_ct_snapshot_free(snapshot);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
+{
+ struct xe_guc_log_snapshot *snapshot_log;
+ struct xe_guc_ct_snapshot *snapshot_ct;
+ struct xe_guc *guc = ct_to_guc(ct);
+ unsigned long flags;
+ bool have_capture;
+
+ if (ctb)
+ ctb->info.broken = true;
+
+ /* Ignore further errors after the first dump until a reset */
+ if (ct->dead.reported)
+ return;
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ /* And only capture one dump at a time */
+ have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
+ ct->dead.reason |= (1 << reason_code) |
+ (1 << CT_DEAD_STATE_CAPTURE);
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ if (have_capture)
+ return;
+
+ snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
+ snapshot_ct = xe_guc_ct_snapshot_capture((ct));
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
+ xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
+ xe_guc_log_snapshot_free(snapshot_log);
+ xe_guc_ct_snapshot_free(snapshot_ct);
+ } else {
+ ct->dead.snapshot_log = snapshot_log;
+ ct->dead.snapshot_ct = snapshot_ct;
+ }
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ queue_work(system_unbound_wq, &(ct)->dead.worker);
+}
+
+static void ct_dead_print(struct xe_dead_ct *dead)
+{
+ struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ static int g_count;
+ struct drm_printer ip = xe_gt_info_printer(gt);
+ struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
+
+ if (!dead->reason) {
+ xe_gt_err(gt, "CTB is dead for no reason!?\n");
+ return;
+ }
+
+ drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
+
+ /* Can't generate a genuine core dump at this point, so just do the good bits */
+ drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ xe_device_snapshot_print(xe, &lp);
+
+ drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
+ drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
+
+ drm_puts(&lp, "**** GuC Log ****\n");
+ xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
+
+ drm_puts(&lp, "**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
+
+ drm_puts(&lp, "Done.\n");
+}
+
+static void ct_dead_worker_func(struct work_struct *w)
+{
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
+
+ if (!ct->dead.reported) {
+ ct->dead.reported = true;
+ ct_dead_print(&ct->dead);
+ }
+
+ spin_lock_irq(&ct->dead.lock);
+
+ xe_guc_log_snapshot_free(ct->dead.snapshot_log);
+ ct->dead.snapshot_log = NULL;
+ xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
+ ct->dead.snapshot_ct = NULL;
+
+ if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
+ /* A reset has occurred so re-arm the error reporting */
+ ct->dead.reason = 0;
+ ct->dead.reported = false;
+ }
+
+ spin_unlock_irq(&ct->dead.lock);
+}
+#endif
diff --git a/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/postimage.1 b/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/postimage.1
new file mode 100644
index 000000000000..b8d832c8f907
--- /dev/null
+++ b/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/postimage.1
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_sriov.h"
+
+static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
+{
+ u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
+ int num_slices = hweight32(CCS_MASK(gt));
+ struct xe_device *xe = gt_to_xe(gt);
+ int width, cslice = 0;
+ u32 config = 0;
+
+ xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
+
+ xe_assert(xe, num_engines && num_engines <= num_slices);
+ xe_assert(xe, !(num_slices % num_engines));
+
+ /*
+ * Loop over all available slices and assign each a user engine.
+ * For example, if there are four compute slices available, the
+ * assignment of compute slices to compute engines would be,
+ *
+ * With 1 engine (ccs0):
+ * slice 0, 1, 2, 3: ccs0
+ *
+ * With 2 engines (ccs0, ccs1):
+ * slice 0, 2: ccs0
+ * slice 1, 3: ccs1
+ *
+ * With 4 engines (ccs0, ccs1, ccs2, ccs3):
+ * slice 0: ccs0
+ * slice 1: ccs1
+ * slice 2: ccs2
+ * slice 3: ccs3
+ */
+ for (width = num_slices / num_engines; width; width--) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
+ continue;
+
+ if (hwe->logical_instance >= num_engines)
+ break;
+
+ config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
+
+ /* If a slice is fused off, leave disabled */
+ while ((CCS_MASK(gt) & BIT(cslice)) == 0)
+ cslice++;
+
+ mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
+ mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
+ cslice++;
+ }
+ }
+
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(gt, CCS_MODE, mode);
+
+ xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
+ mode, config, num_engines, num_slices);
+}
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt)
+{
+ if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
+ __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
+}
+
+static ssize_t
+num_cslices_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
+}
+
+static DEVICE_ATTR_RO(num_cslices);
+
+static ssize_t
+ccs_mode_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", gt->ccs_mode);
+}
+
+static ssize_t
+ccs_mode_store(struct device *kdev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 num_engines, num_slices;
+ int ret;
+
+ if (IS_SRIOV(xe)) {
+ xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtou32(buff, 0, &num_engines);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure number of engines specified is valid and there is an
+ * exact multiple of engines for slices.
+ */
+ num_slices = hweight32(CCS_MASK(gt));
+ if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
+ xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
+ num_engines, num_slices);
+ return -EINVAL;
+ }
+
+ /* CCS mode can only be updated when there are no drm clients */
+ spin_lock(&xe->clients.lock);
+ if (xe->clients.count) {
+ spin_unlock(&xe->clients.lock);
+ return -EBUSY;
+ }
+
+ if (gt->ccs_mode != num_engines) {
+ xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
+ gt->ccs_mode = num_engines;
+ xe_gt_record_user_engines(gt);
+ xe_gt_reset_async(gt);
+ }
+
+ spin_unlock(&xe->clients.lock);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ccs_mode);
+
+static const struct attribute *gt_ccs_mode_attrs[] = {
+ &dev_attr_ccs_mode.attr,
+ &dev_attr_num_cslices.attr,
+ NULL,
+};
+
+static void xe_gt_ccs_mode_sysfs_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
+}
+
+/**
+ * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
+ * @gt: GT structure
+ *
+ * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
+ * number of compute hardware engines to which the available compute slices
+ * are to be allocated. This user configuration change triggers a gt reset
+ * and it is expected that there are no open drm clients while doing so.
+ * The number of available compute slices is exposed to user through a per-gt
+ * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (!xe_gt_ccs_mode_enabled(gt))
+ return 0;
+
+ err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
+}
diff --git a/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage b/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage
new file mode 100644
index 000000000000..7471e5e827fb
--- /dev/null
+++ b/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_sriov.h"
+
+static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
+{
+ u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
+ int num_slices = hweight32(CCS_MASK(gt));
+ struct xe_device *xe = gt_to_xe(gt);
+ int width, cslice = 0;
+ u32 config = 0;
+
+ xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
+
+ xe_assert(xe, num_engines && num_engines <= num_slices);
+ xe_assert(xe, !(num_slices % num_engines));
+
+ /*
+ * Loop over all available slices and assign each a user engine.
+ * For example, if there are four compute slices available, the
+ * assignment of compute slices to compute engines would be,
+ *
+ * With 1 engine (ccs0):
+ * slice 0, 1, 2, 3: ccs0
+ *
+ * With 2 engines (ccs0, ccs1):
+ * slice 0, 2: ccs0
+ * slice 1, 3: ccs1
+ *
+ * With 4 engines (ccs0, ccs1, ccs2, ccs3):
+ * slice 0: ccs0
+ * slice 1: ccs1
+ * slice 2: ccs2
+ * slice 3: ccs3
+ */
+ for (width = num_slices / num_engines; width; width--) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
+ continue;
+
+ if (hwe->logical_instance >= num_engines)
+ break;
+
+ config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
+
+ /* If a slice is fused off, leave disabled */
+ while ((CCS_MASK(gt) & BIT(cslice)) == 0)
+ cslice++;
+
+ mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
+ mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
+ cslice++;
+ }
+ }
+
+<<<<<<<
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
+=======
+ xe_mmio_write32(gt, CCS_MODE, mode);
+>>>>>>>
+
+ xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
+ mode, config, num_engines, num_slices);
+}
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt)
+{
+ if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
+ __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
+}
+
+static ssize_t
+num_cslices_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
+}
+
+static DEVICE_ATTR_RO(num_cslices);
+
+static ssize_t
+ccs_mode_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", gt->ccs_mode);
+}
+
+static ssize_t
+ccs_mode_store(struct device *kdev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 num_engines, num_slices;
+ int ret;
+
+ if (IS_SRIOV(xe)) {
+ xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtou32(buff, 0, &num_engines);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure number of engines specified is valid and there is an
+ * exact multiple of engines for slices.
+ */
+ num_slices = hweight32(CCS_MASK(gt));
+ if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
+ xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
+ num_engines, num_slices);
+ return -EINVAL;
+ }
+
+ /* CCS mode can only be updated when there are no drm clients */
+ spin_lock(&xe->clients.lock);
+ if (xe->clients.count) {
+ spin_unlock(&xe->clients.lock);
+ return -EBUSY;
+ }
+
+ if (gt->ccs_mode != num_engines) {
+ xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
+ gt->ccs_mode = num_engines;
+ xe_gt_record_user_engines(gt);
+ xe_gt_reset_async(gt);
+ }
+
+ spin_unlock(&xe->clients.lock);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ccs_mode);
+
+static const struct attribute *gt_ccs_mode_attrs[] = {
+ &dev_attr_ccs_mode.attr,
+ &dev_attr_num_cslices.attr,
+ NULL,
+};
+
+static void xe_gt_ccs_mode_sysfs_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
+}
+
+/**
+ * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
+ * @gt: GT structure
+ *
+ * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
+ * number of compute hardware engines to which the available compute slices
+ * are to be allocated. This user configuration change triggers a gt reset
+ * and it is expected that there are no open drm clients while doing so.
+ * The number of available compute slices is exposed to user through a per-gt
+ * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (!xe_gt_ccs_mode_enabled(gt))
+ return 0;
+
+ err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
+}
diff --git a/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage.1 b/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage.1
new file mode 100644
index 000000000000..7471e5e827fb
--- /dev/null
+++ b/rr-cache/97de4c70771cbdbbc8353a02ad5c622f2ba45eaf/preimage.1
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_sriov.h"
+
+static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
+{
+ u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
+ int num_slices = hweight32(CCS_MASK(gt));
+ struct xe_device *xe = gt_to_xe(gt);
+ int width, cslice = 0;
+ u32 config = 0;
+
+ xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
+
+ xe_assert(xe, num_engines && num_engines <= num_slices);
+ xe_assert(xe, !(num_slices % num_engines));
+
+ /*
+ * Loop over all available slices and assign each a user engine.
+ * For example, if there are four compute slices available, the
+ * assignment of compute slices to compute engines would be,
+ *
+ * With 1 engine (ccs0):
+ * slice 0, 1, 2, 3: ccs0
+ *
+ * With 2 engines (ccs0, ccs1):
+ * slice 0, 2: ccs0
+ * slice 1, 3: ccs1
+ *
+ * With 4 engines (ccs0, ccs1, ccs2, ccs3):
+ * slice 0: ccs0
+ * slice 1: ccs1
+ * slice 2: ccs2
+ * slice 3: ccs3
+ */
+ for (width = num_slices / num_engines; width; width--) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
+ continue;
+
+ if (hwe->logical_instance >= num_engines)
+ break;
+
+ config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
+
+ /* If a slice is fused off, leave disabled */
+ while ((CCS_MASK(gt) & BIT(cslice)) == 0)
+ cslice++;
+
+ mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
+ mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
+ cslice++;
+ }
+ }
+
+<<<<<<<
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
+=======
+ xe_mmio_write32(gt, CCS_MODE, mode);
+>>>>>>>
+
+ xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
+ mode, config, num_engines, num_slices);
+}
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt)
+{
+ if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
+ __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
+}
+
+static ssize_t
+num_cslices_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
+}
+
+static DEVICE_ATTR_RO(num_cslices);
+
+static ssize_t
+ccs_mode_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", gt->ccs_mode);
+}
+
+static ssize_t
+ccs_mode_store(struct device *kdev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 num_engines, num_slices;
+ int ret;
+
+ if (IS_SRIOV(xe)) {
+ xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtou32(buff, 0, &num_engines);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure number of engines specified is valid and there is an
+ * exact multiple of engines for slices.
+ */
+ num_slices = hweight32(CCS_MASK(gt));
+ if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
+ xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
+ num_engines, num_slices);
+ return -EINVAL;
+ }
+
+ /* CCS mode can only be updated when there are no drm clients */
+ spin_lock(&xe->clients.lock);
+ if (xe->clients.count) {
+ spin_unlock(&xe->clients.lock);
+ return -EBUSY;
+ }
+
+ if (gt->ccs_mode != num_engines) {
+ xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
+ gt->ccs_mode = num_engines;
+ xe_gt_record_user_engines(gt);
+ xe_gt_reset_async(gt);
+ }
+
+ spin_unlock(&xe->clients.lock);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ccs_mode);
+
+static const struct attribute *gt_ccs_mode_attrs[] = {
+ &dev_attr_ccs_mode.attr,
+ &dev_attr_num_cslices.attr,
+ NULL,
+};
+
+static void xe_gt_ccs_mode_sysfs_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
+}
+
+/**
+ * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
+ * @gt: GT structure
+ *
+ * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
+ * number of compute hardware engines to which the available compute slices
+ * are to be allocated. This user configuration change triggers a gt reset
+ * and it is expected that there are no open drm clients while doing so.
+ * The number of available compute slices is exposed to user through a per-gt
+ * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (!xe_gt_ccs_mode_enabled(gt))
+ return 0;
+
+ err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
+}
diff --git a/rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/postimage b/rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/postimage
new file mode 100644
index 000000000000..b6adfb9f2030
--- /dev/null
+++ b/rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/postimage
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_sriov.h"
+
+static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
+{
+ u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
+ int num_slices = hweight32(CCS_MASK(gt));
+ struct xe_device *xe = gt_to_xe(gt);
+ int width, cslice = 0;
+ u32 config = 0;
+
+ xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
+
+ xe_assert(xe, num_engines && num_engines <= num_slices);
+ xe_assert(xe, !(num_slices % num_engines));
+
+ /*
+ * Loop over all available slices and assign each a user engine.
+ * For example, if there are four compute slices available, the
+ * assignment of compute slices to compute engines would be,
+ *
+ * With 1 engine (ccs0):
+ * slice 0, 1, 2, 3: ccs0
+ *
+ * With 2 engines (ccs0, ccs1):
+ * slice 0, 2: ccs0
+ * slice 1, 3: ccs1
+ *
+ * With 4 engines (ccs0, ccs1, ccs2, ccs3):
+ * slice 0: ccs0
+ * slice 1: ccs1
+ * slice 2: ccs2
+ * slice 3: ccs3
+ */
+ for (width = num_slices / num_engines; width; width--) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
+ continue;
+
+ if (hwe->logical_instance >= num_engines)
+ break;
+
+ config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
+
+ /* If a slice is fused off, leave disabled */
+ while ((CCS_MASK(gt) & BIT(cslice)) == 0)
+ cslice++;
+
+ mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
+ mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
+ cslice++;
+ }
+ }
+
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
+
+ xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
+ mode, config, num_engines, num_slices);
+}
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt)
+{
+ if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
+ __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
+}
+
+static ssize_t
+num_cslices_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
+}
+
+static DEVICE_ATTR_RO(num_cslices);
+
+static ssize_t
+ccs_mode_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", gt->ccs_mode);
+}
+
+static ssize_t
+ccs_mode_store(struct device *kdev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 num_engines, num_slices;
+ int ret;
+
+ if (IS_SRIOV(xe)) {
+ xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtou32(buff, 0, &num_engines);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure number of engines specified is valid and there is an
+ * exact multiple of engines for slices.
+ */
+ num_slices = hweight32(CCS_MASK(gt));
+ if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
+ xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
+ num_engines, num_slices);
+ return -EINVAL;
+ }
+
+ /* CCS mode can only be updated when there are no drm clients */
+ mutex_lock(&xe->drm.filelist_mutex);
+ if (!list_empty(&xe->drm.filelist)) {
+ mutex_unlock(&xe->drm.filelist_mutex);
+ xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
+ return -EBUSY;
+ }
+
+ if (gt->ccs_mode != num_engines) {
+ xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
+ gt->ccs_mode = num_engines;
+ xe_gt_record_user_engines(gt);
+ xe_gt_reset_async(gt);
+ }
+
+ mutex_unlock(&xe->drm.filelist_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ccs_mode);
+
+static const struct attribute *gt_ccs_mode_attrs[] = {
+ &dev_attr_ccs_mode.attr,
+ &dev_attr_num_cslices.attr,
+ NULL,
+};
+
+static void xe_gt_ccs_mode_sysfs_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
+}
+
+/**
+ * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
+ * @gt: GT structure
+ *
+ * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
+ * number of compute hardware engines to which the available compute slices
+ * are to be allocated. This user configuration change triggers a gt reset
+ * and it is expected that there are no open drm clients while doing so.
+ * The number of available compute slices is exposed to user through a per-gt
+ * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (!xe_gt_ccs_mode_enabled(gt))
+ return 0;
+
+ err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
+}
diff --git a/rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/preimage b/rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/preimage
new file mode 100644
index 000000000000..04388b3dc6f8
--- /dev/null
+++ b/rr-cache/e35568f63739f4426e51da89122f3dad8f90ac33/preimage
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_sriov.h"
+
+static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
+{
+ u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
+ int num_slices = hweight32(CCS_MASK(gt));
+ struct xe_device *xe = gt_to_xe(gt);
+ int width, cslice = 0;
+ u32 config = 0;
+
+ xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
+
+ xe_assert(xe, num_engines && num_engines <= num_slices);
+ xe_assert(xe, !(num_slices % num_engines));
+
+ /*
+ * Loop over all available slices and assign each a user engine.
+ * For example, if there are four compute slices available, the
+ * assignment of compute slices to compute engines would be,
+ *
+ * With 1 engine (ccs0):
+ * slice 0, 1, 2, 3: ccs0
+ *
+ * With 2 engines (ccs0, ccs1):
+ * slice 0, 2: ccs0
+ * slice 1, 3: ccs1
+ *
+ * With 4 engines (ccs0, ccs1, ccs2, ccs3):
+ * slice 0: ccs0
+ * slice 1: ccs1
+ * slice 2: ccs2
+ * slice 3: ccs3
+ */
+ for (width = num_slices / num_engines; width; width--) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
+ continue;
+
+ if (hwe->logical_instance >= num_engines)
+ break;
+
+ config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
+
+ /* If a slice is fused off, leave disabled */
+ while ((CCS_MASK(gt) & BIT(cslice)) == 0)
+ cslice++;
+
+ mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
+ mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
+ cslice++;
+ }
+ }
+
+<<<<<<<
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(gt, CCS_MODE, mode);
+=======
+ xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
+>>>>>>>
+
+ xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
+ mode, config, num_engines, num_slices);
+}
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt)
+{
+ if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
+ __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
+}
+
+static ssize_t
+num_cslices_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
+}
+
+static DEVICE_ATTR_RO(num_cslices);
+
+static ssize_t
+ccs_mode_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", gt->ccs_mode);
+}
+
+static ssize_t
+ccs_mode_store(struct device *kdev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 num_engines, num_slices;
+ int ret;
+
+ if (IS_SRIOV(xe)) {
+ xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtou32(buff, 0, &num_engines);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure number of engines specified is valid and there is an
+ * exact multiple of engines for slices.
+ */
+ num_slices = hweight32(CCS_MASK(gt));
+ if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
+ xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
+ num_engines, num_slices);
+ return -EINVAL;
+ }
+
+ /* CCS mode can only be updated when there are no drm clients */
+ mutex_lock(&xe->drm.filelist_mutex);
+ if (!list_empty(&xe->drm.filelist)) {
+ mutex_unlock(&xe->drm.filelist_mutex);
+ xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
+ return -EBUSY;
+ }
+
+ if (gt->ccs_mode != num_engines) {
+ xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
+ gt->ccs_mode = num_engines;
+ xe_gt_record_user_engines(gt);
+ xe_gt_reset_async(gt);
+ }
+
+ mutex_unlock(&xe->drm.filelist_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ccs_mode);
+
+static const struct attribute *gt_ccs_mode_attrs[] = {
+ &dev_attr_ccs_mode.attr,
+ &dev_attr_num_cslices.attr,
+ NULL,
+};
+
+static void xe_gt_ccs_mode_sysfs_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
+}
+
+/**
+ * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
+ * @gt: GT structure
+ *
+ * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
+ * number of compute hardware engines to which the available compute slices
+ * are to be allocated. This user configuration change triggers a gt reset
+ * and it is expected that there are no open drm clients while doing so.
+ * The number of available compute slices is exposed to user through a per-gt
+ * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (!xe_gt_ccs_mode_enabled(gt))
+ return 0;
+
+ err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
+}
diff --git a/rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/postimage b/rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/postimage
new file mode 100644
index 000000000000..bd476297ed0a
--- /dev/null
+++ b/rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/postimage
@@ -0,0 +1,1355 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "gt/intel_engine.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
+#include "i915_drv.h"
+#include "i915_pmu.h"
+
+/* Frequency for the sampling timer for events which need it. */
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define ENGINE_SAMPLE_MASK \
+ (BIT(I915_SAMPLE_BUSY) | \
+ BIT(I915_SAMPLE_WAIT) | \
+ BIT(I915_SAMPLE_SEMA))
+
+static cpumask_t i915_pmu_cpumask;
+static unsigned int i915_pmu_target_cpu = -1;
+
+static struct i915_pmu *event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct i915_pmu, base);
+}
+
+static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
+{
+ return container_of(pmu, struct drm_i915_private, pmu);
+}
+
+static u8 engine_config_sample(u64 config)
+{
+ return config & I915_PMU_SAMPLE_MASK;
+}
+
+static u8 engine_event_sample(struct perf_event *event)
+{
+ return engine_config_sample(event->attr.config);
+}
+
+static u8 engine_event_class(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
+}
+
+static u8 engine_event_instance(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
+}
+
+static bool is_engine_config(const u64 config)
+{
+ return config < __I915_PMU_OTHER(0);
+}
+
+static unsigned int config_gt_id(const u64 config)
+{
+ return config >> __I915_PMU_GT_SHIFT;
+}
+
+static u64 config_counter(const u64 config)
+{
+ return config & ~(~0ULL << __I915_PMU_GT_SHIFT);
+}
+
+static unsigned int other_bit(const u64 config)
+{
+ unsigned int val;
+
+ switch (config_counter(config)) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+ break;
+ default:
+ /*
+ * Events that do not require sampling, or tracking state
+ * transitions between enabled and disabled can be ignored.
+ */
+ return -1;
+ }
+
+ return I915_ENGINE_SAMPLE_COUNT +
+ config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT +
+ val;
+}
+
+static unsigned int config_bit(const u64 config)
+{
+ if (is_engine_config(config))
+ return engine_config_sample(config);
+ else
+ return other_bit(config);
+}
+
+static u32 config_mask(const u64 config)
+{
+ unsigned int bit = config_bit(config);
+
+ if (__builtin_constant_p(config))
+ BUILD_BUG_ON(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+ else
+ WARN_ON_ONCE(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+
+ return BIT(config_bit(config));
+}
+
+static bool is_engine_event(struct perf_event *event)
+{
+ return is_engine_config(event->attr.config);
+}
+
+static unsigned int event_bit(struct perf_event *event)
+{
+ return config_bit(event->attr.config);
+}
+
+static u32 frequency_enabled_mask(void)
+{
+ unsigned int i;
+ u32 mask = 0;
+
+ for (i = 0; i < I915_PMU_MAX_GT; i++)
+ mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(i));
+
+ return mask;
+}
+
+static bool pmu_needs_timer(struct i915_pmu *pmu)
+{
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ u32 enable;
+
+ /*
+ * Only some counters need the sampling timer.
+ *
+ * We start with a bitmask of all currently enabled events.
+ */
+ enable = pmu->enable;
+
+ /*
+ * Mask out all the ones which do not need the timer, or in
+ * other words keep all the ones that could need the timer.
+ */
+ enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK;
+
+ /*
+ * Also there is software busyness tracking available we do not
+ * need the timer for I915_SAMPLE_BUSY counter.
+ */
+ if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
+ enable &= ~BIT(I915_SAMPLE_BUSY);
+
+ /*
+ * If some bits remain it means we need the sampling timer running.
+ */
+ return enable;
+}
+
+static u64 __get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ u64 val;
+
+ val = intel_rc6_residency_ns(&gt->rc6, INTEL_RC6_RES_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, INTEL_RC6_RES_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, INTEL_RC6_RES_RC6pp);
+
+ return val;
+}
+
+static inline s64 ktime_since_raw(const ktime_t kt)
+{
+ return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
+}
+
+static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
+{
+ return pmu->sample[gt_id][sample].cur;
+}
+
+static void
+store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
+{
+ pmu->sample[gt_id][sample].cur = val;
+}
+
+static void
+add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
+{
+ pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
+}
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
+ struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
+ unsigned long flags;
+ u64 val;
+
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
+ val = __get_rc6(gt);
+ intel_gt_pm_put_async(gt, wakeref);
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (wakeref) {
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
+ } else {
+ /*
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ val = ktime_since_raw(pmu->sleep_last[gt_id]);
+ val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
+ }
+
+ if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
+ val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
+ else
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return val;
+}
+
+static void init_rc6(struct i915_pmu *pmu)
+{
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ u64 val = __get_rc6(gt);
+
+ store_sample(pmu, i, __I915_SAMPLE_RC6, val);
+ store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
+ val);
+ pmu->sleep_last[i] = ktime_get_raw();
+ }
+ }
+}
+
+static void park_rc6(struct intel_gt *gt)
+{
+ struct i915_pmu *pmu = &gt->i915->pmu;
+
+ store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
+ pmu->sleep_last[gt->info.id] = ktime_get_raw();
+}
+
+static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
+{
+ if (!pmu->timer_enabled && pmu_needs_timer(pmu)) {
+ pmu->timer_enabled = true;
+ pmu->timer_last = ktime_get();
+ hrtimer_start_range_ns(&pmu->timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+void i915_pmu_gt_parked(struct intel_gt *gt)
+{
+ struct i915_pmu *pmu = &gt->i915->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ park_rc6(gt);
+
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ pmu->unparked &= ~BIT(gt->info.id);
+ if (pmu->unparked == 0)
+ pmu->timer_enabled = false;
+
+ spin_unlock_irq(&pmu->lock);
+}
+
+void i915_pmu_gt_unparked(struct intel_gt *gt)
+{
+ struct i915_pmu *pmu = &gt->i915->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ /*
+ * Re-enable sampling timer when GPU goes active.
+ */
+ if (pmu->unparked == 0)
+ __i915_pmu_maybe_start_timer(pmu);
+
+ pmu->unparked |= BIT(gt->info.id);
+
+ spin_unlock_irq(&pmu->lock);
+}
+
+static void
+add_sample(struct i915_pmu_sample *sample, u32 val)
+{
+ sample->cur += val;
+}
+
+static bool exclusive_mmio_access(const struct drm_i915_private *i915)
+{
+ /*
+ * We have to avoid concurrent mmio cache line access on gen7 or
+ * risk a machine hang. For a fun history lesson dig out the old
+ * userspace intel_gpu_top and run it on Ivybridge or Haswell!
+ */
+ return GRAPHICS_VER(i915) == 7;
+}
+
+static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ bool busy;
+ u32 val;
+
+ val = ENGINE_READ_FW(engine, RING_CTL);
+ if (val == 0) /* powerwell off => engine idle */
+ return;
+
+ if (val & RING_WAIT)
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ return;
+
+ /*
+ * While waiting on a semaphore or event, MI_MODE reports the
+ * ring as idle. However, previously using the seqno, and with
+ * execlists sampling, we account for the ring waiting as the
+ * engine being busy. Therefore, we record the sample as being
+ * busy if either waiting or !idle.
+ */
+ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+ if (!busy) {
+ val = ENGINE_READ_FW(engine, RING_MI_MODE);
+ busy = !(val & MODE_IDLE);
+ }
+ if (busy)
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
+static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ u32 tail, head, acthd;
+
+ tail = ENGINE_READ_FW(engine, RING_TAIL);
+ head = ENGINE_READ_FW(engine, RING_HEAD);
+ acthd = ENGINE_READ_FW(engine, ACTHD);
+
+ if (head & HEAD_WAIT_I8XX)
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+
+ if (head & HEAD_WAIT_I8XX || head != acthd ||
+ (head & HEAD_ADDR) != (tail & TAIL_ADDR))
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
+static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ if (GRAPHICS_VER(engine->i915) >= 3)
+ gen3_engine_sample(engine, period_ns);
+ else
+ gen2_engine_sample(engine, period_ns);
+}
+
+static void
+engines_sample(struct intel_gt *gt, unsigned int period_ns)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ return;
+
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ for_each_engine(engine, gt, id) {
+ if (!engine->pmu.enable)
+ continue;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ continue;
+
+ if (exclusive_mmio_access(i915)) {
+ spin_lock_irqsave(&engine->uncore->lock, flags);
+ engine_sample(engine, period_ns);
+ spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ } else {
+ engine_sample(engine, period_ns);
+ }
+
+ intel_engine_pm_put_async(engine);
+ }
+}
+
+static bool
+frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
+{
+ return pmu->enable &
+ (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt)));
+}
+
+static void
+frequency_sample(struct intel_gt *gt, unsigned int period_ns)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
+ struct i915_pmu *pmu = &i915->pmu;
+ struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
+
+ if (!frequency_sampling_enabled(pmu, gt_id))
+ return;
+
+ /* Report 0/0 (actual/requested) frequency while parked. */
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!wakeref)
+ return;
+
+ if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
+ u32 val;
+
+ /*
+ * We take a quick peek here without using forcewake
+ * so that we don't perturb the system under observation
+ * (forcewake => !rc6 => increased power use). We expect
+ * that if the read fails because it is outside of the
+ * mmio power well, then it will return 0 -- in which
+ * case we assume the system is running at the intended
+ * frequency. Fortunately, the read should rarely fail!
+ */
+ val = intel_rps_read_actual_frequency_fw(rps);
+ if (!val)
+ val = intel_gpu_freq(rps, rps->cur_freq);
+
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
+ val, period_ns / 1000);
+ }
+
+ if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
+ intel_rps_get_requested_frequency(rps),
+ period_ns / 1000);
+ }
+
+ intel_gt_pm_put_async(gt, wakeref);
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+ struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ unsigned int period_ns;
+ struct intel_gt *gt;
+ unsigned int i;
+ ktime_t now;
+
+ if (!READ_ONCE(pmu->timer_enabled))
+ return HRTIMER_NORESTART;
+
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
+ pmu->timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+
+ for_each_gt(gt, i915, i) {
+ if (!(pmu->unparked & BIT(i)))
+ continue;
+
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
+ }
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
+
+ return HRTIMER_RESTART;
+}
+
+static void i915_pmu_event_destroy(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+
+ drm_WARN_ON(&i915->drm, event->parent);
+
+ drm_dev_put(&i915->drm);
+}
+
+static int
+engine_event_status(struct intel_engine_cs *engine,
+ enum drm_i915_pmu_engine_sample sample)
+{
+ switch (sample) {
+ case I915_SAMPLE_BUSY:
+ case I915_SAMPLE_WAIT:
+ break;
+ case I915_SAMPLE_SEMA:
+ if (GRAPHICS_VER(engine->i915) < 6)
+ return -ENODEV;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int
+config_status(struct drm_i915_private *i915, u64 config)
+{
+ struct intel_gt *gt = to_gt(i915);
+
+ unsigned int gt_id = config_gt_id(config);
+ unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0;
+
+ if (gt_id > max_gt_id)
+ return -ENOENT;
+
+ switch (config_counter(config)) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ /* Requires a mutex for sampling! */
+ return -ENODEV;
+ fallthrough;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ if (GRAPHICS_VER(i915) < 6)
+ return -ENODEV;
+ break;
+ case I915_PMU_INTERRUPTS:
+ if (gt_id)
+ return -ENOENT;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ if (!gt->rc6.supported)
+ return -ENODEV;
+ break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int engine_event_init(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event));
+ if (!engine)
+ return -ENODEV;
+
+ return engine_event_status(engine, engine_event_sample(event));
+}
+
+static int i915_pmu_event_init(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ int ret;
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* only allow running on one cpu at a time */
+ if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
+ return -EINVAL;
+
+ if (is_engine_event(event))
+ ret = engine_event_init(event);
+ else
+ ret = config_status(i915, event->attr.config);
+ if (ret)
+ return ret;
+
+ if (!event->parent) {
+ drm_dev_get(&i915->drm);
+ event->destroy = i915_pmu_event_destroy;
+ }
+
+ return 0;
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ u64 val = 0;
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
+ /* Do nothing */
+ } else if (sample == I915_SAMPLE_BUSY &&
+ intel_engine_supports_stats(engine)) {
+ ktime_t unused;
+
+ val = ktime_to_ns(intel_engine_get_busy_time(engine,
+ &unused));
+ } else {
+ val = engine->pmu.sample[sample].cur;
+ }
+ } else {
+ const unsigned int gt_id = config_gt_id(event->attr.config);
+ const u64 config = config_counter(event->attr.config);
+
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val =
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_ACT),
+ USEC_PER_SEC /* to MHz */);
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val =
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_REQ),
+ USEC_PER_SEC /* to MHz */);
+ break;
+ case I915_PMU_INTERRUPTS:
+ val = READ_ONCE(pmu->irq_count);
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = get_rc6(i915->gt[gt_id]);
+ break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+ if (!pmu->registered) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ prev = local64_read(&hwc->prev_count);
+ do {
+ new = __i915_pmu_event_read(event);
+ } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
+
+ local64_add(new - prev, &event->count);
+}
+
+static void i915_pmu_enable(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ const unsigned int bit = event_bit(event);
+ unsigned long flags;
+
+ if (bit == -1)
+ goto update;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /*
+ * Update the bitmask of enabled events and increment
+ * the event reference counter.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+
+ pmu->enable |= BIT(bit);
+ pmu->enable_count[bit]++;
+
+ /*
+ * Start the sampling timer if needed and not already enabled.
+ */
+ __i915_pmu_maybe_start_timer(pmu);
+
+ /*
+ * For per-engine events the bitmask and reference counting
+ * is stored per engine.
+ */
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
+ I915_ENGINE_SAMPLE_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
+ I915_ENGINE_SAMPLE_COUNT);
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+
+ engine->pmu.enable |= BIT(sample);
+ engine->pmu.enable_count[sample]++;
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+update:
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+}
+
+static void i915_pmu_disable(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ const unsigned int bit = event_bit(event);
+ unsigned long flags;
+
+ if (bit == -1)
+ return;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--engine->pmu.enable_count[sample] == 0)
+ engine->pmu.enable &= ~BIT(sample);
+ }
+
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--pmu->enable_count[bit] == 0) {
+ pmu->enable &= ~BIT(bit);
+ pmu->timer_enabled &= pmu_needs_timer(pmu);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static void i915_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+
+ if (!pmu->registered)
+ return;
+
+ i915_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void i915_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+
+ if (!pmu->registered)
+ goto out;
+
+ if (flags & PERF_EF_UPDATE)
+ i915_pmu_event_read(event);
+
+ i915_pmu_disable(event);
+
+out:
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int i915_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (flags & PERF_EF_START)
+ i915_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void i915_pmu_event_del(struct perf_event *event, int flags)
+{
+ i915_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int i915_pmu_event_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
+struct i915_str_attribute {
+ struct device_attribute attr;
+ const char *str;
+};
+
+static ssize_t i915_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_str_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_str_attribute, attr);
+ return sprintf(buf, "%s\n", eattr->str);
+}
+
+#define I915_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct i915_str_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
+ .str = _config, } \
+ })[0].attr.attr)
+
+static struct attribute *i915_pmu_format_attrs[] = {
+ I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = i915_pmu_format_attrs,
+};
+
+struct i915_ext_attribute {
+ struct device_attribute attr;
+ unsigned long val;
+};
+
+static ssize_t i915_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", eattr->val);
+}
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *i915_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_cpumask_attr_group = {
+ .attrs = i915_cpumask_attrs,
+};
+
+#define __event(__counter, __name, __unit) \
+{ \
+ .counter = (__counter), \
+ .name = (__name), \
+ .unit = (__unit), \
+ .global = false, \
+}
+
+#define __global_event(__counter, __name, __unit) \
+{ \
+ .counter = (__counter), \
+ .name = (__name), \
+ .unit = (__unit), \
+ .global = true, \
+}
+
+#define __engine_event(__sample, __name) \
+{ \
+ .sample = (__sample), \
+ .name = (__name), \
+}
+
+static struct i915_ext_attribute *
+add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
+{
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = i915_pmu_event_show;
+ attr->val = config;
+
+ return ++attr;
+}
+
+static struct perf_pmu_events_attr *
+add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
+ const char *str)
+{
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = perf_event_sysfs_show;
+ attr->event_str = str;
+
+ return ++attr;
+}
+
+static struct attribute **
+create_event_attributes(struct i915_pmu *pmu)
+{
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ static const struct {
+ unsigned int counter;
+ const char *name;
+ const char *unit;
+ bool global;
+ } events[] = {
+ __event(0, "actual-frequency", "M"),
+ __event(1, "requested-frequency", "M"),
+ __global_event(2, "interrupts", NULL),
+ __event(3, "rc6-residency", "ns"),
+ __event(4, "software-gt-awake-time", "ns"),
+ };
+ static const struct {
+ enum drm_i915_pmu_engine_sample sample;
+ char *name;
+ } engine_events[] = {
+ __engine_event(I915_SAMPLE_BUSY, "busy"),
+ __engine_event(I915_SAMPLE_SEMA, "sema"),
+ __engine_event(I915_SAMPLE_WAIT, "wait"),
+ };
+ unsigned int count = 0;
+ struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
+ struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
+ struct attribute **attr = NULL, **attr_iter;
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt;
+ unsigned int i, j;
+
+ /* Count how many counters we will be exposing. */
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+
+ if (!config_status(i915, config))
+ count++;
+ }
+ }
+
+ for_each_uabi_engine(engine, i915) {
+ for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
+ if (!engine_event_status(engine,
+ engine_events[i].sample))
+ count++;
+ }
+ }
+
+ /* Allocate attribute objects and table. */
+ i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
+ if (!i915_attr)
+ goto err_alloc;
+
+ pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
+ if (!pmu_attr)
+ goto err_alloc;
+
+ /* Max one pointer of each attribute type plus a termination entry. */
+ attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ goto err_alloc;
+
+ i915_iter = i915_attr;
+ pmu_iter = pmu_attr;
+ attr_iter = attr;
+
+ /* Initialize supported non-engine counters. */
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+ char *str;
+
+ if (config_status(i915, config))
+ continue;
+
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kstrdup(events[i].name, GFP_KERNEL);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u",
+ events[i].name, j);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter = add_i915_attr(i915_iter, str, config);
+
+ if (events[i].unit) {
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kasprintf(GFP_KERNEL, "%s.unit",
+ events[i].name);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u.unit",
+ events[i].name, j);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str,
+ events[i].unit);
+ }
+ }
+ }
+
+ /* Initialize supported engine counters. */
+ for_each_uabi_engine(engine, i915) {
+ for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
+ char *str;
+
+ if (engine_event_status(engine,
+ engine_events[i].sample))
+ continue;
+
+ str = kasprintf(GFP_KERNEL, "%s-%s",
+ engine->name, engine_events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter =
+ add_i915_attr(i915_iter, str,
+ __I915_PMU_ENGINE(engine->uabi_class,
+ engine->uabi_instance,
+ engine_events[i].sample));
+
+ str = kasprintf(GFP_KERNEL, "%s-%s.unit",
+ engine->name, engine_events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
+ }
+ }
+
+ pmu->i915_attr = i915_attr;
+ pmu->pmu_attr = pmu_attr;
+
+ return attr;
+
+err:;
+ for (attr_iter = attr; *attr_iter; attr_iter++)
+ kfree((*attr_iter)->name);
+
+err_alloc:
+ kfree(attr);
+ kfree(i915_attr);
+ kfree(pmu_attr);
+
+ return NULL;
+}
+
+static void free_event_attributes(struct i915_pmu *pmu)
+{
+ struct attribute **attr_iter = pmu->events_attr_group.attrs;
+
+ for (; *attr_iter; attr_iter++)
+ kfree((*attr_iter)->name);
+
+ kfree(pmu->events_attr_group.attrs);
+ kfree(pmu->i915_attr);
+ kfree(pmu->pmu_attr);
+
+ pmu->events_attr_group.attrs = NULL;
+ pmu->i915_attr = NULL;
+ pmu->pmu_attr = NULL;
+}
+
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
+
+ /* Select the first online CPU as a designated reader. */
+ if (cpumask_empty(&i915_pmu_cpumask))
+ cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+
+ return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
+ unsigned int target = i915_pmu_target_cpu;
+
+ /*
+ * Unregistering an instance generates a CPU offline event which we must
+ * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
+ */
+ if (!pmu->registered)
+ return 0;
+
+ if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+ target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+
+ /* Migrate events if there is a valid target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &i915_pmu_cpumask);
+ i915_pmu_target_cpu = target;
+ }
+ }
+
+ if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
+ perf_pmu_migrate_context(&pmu->base, cpu, target);
+ pmu->cpuhp.cpu = target;
+ }
+
+ return 0;
+}
+
+static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
+
+int i915_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/x86/intel/i915:online",
+ i915_pmu_cpu_online,
+ i915_pmu_cpu_offline);
+ if (ret < 0)
+ pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
+ ret);
+ else
+ cpuhp_state = ret;
+
+ return 0;
+}
+
+void i915_pmu_exit(void)
+{
+ if (cpuhp_state != CPUHP_INVALID)
+ cpuhp_remove_multi_state(cpuhp_state);
+}
+
+static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
+{
+ if (cpuhp_state == CPUHP_INVALID)
+ return -EINVAL;
+
+ return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
+}
+
+static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
+{
+ cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
+}
+
+void i915_pmu_register(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+ const struct attribute_group *attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &pmu->events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+ };
+ int ret = -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+ pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
+
+ if (IS_DGFX(i915)) {
+ pmu->name = kasprintf(GFP_KERNEL,
+ "i915_%s",
+ dev_name(i915->drm.dev));
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
+ pmu->name = "i915";
+ }
+ if (!pmu->name)
+ goto err;
+
+ pmu->events_attr_group.name = "events";
+ pmu->events_attr_group.attrs = create_event_attributes(pmu);
+ if (!pmu->events_attr_group.attrs)
+ goto err_name;
+
+ pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+ if (!pmu->base.attr_groups)
+ goto err_attr;
+
+ pmu->base.module = THIS_MODULE;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = i915_pmu_event_init;
+ pmu->base.add = i915_pmu_event_add;
+ pmu->base.del = i915_pmu_event_del;
+ pmu->base.start = i915_pmu_event_start;
+ pmu->base.stop = i915_pmu_event_stop;
+ pmu->base.read = i915_pmu_event_read;
+ pmu->base.event_idx = i915_pmu_event_event_idx;
+
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
+ if (ret)
+ goto err_groups;
+
+ ret = i915_pmu_register_cpuhp_state(pmu);
+ if (ret)
+ goto err_unreg;
+
+ pmu->registered = true;
+
+ return;
+
+err_unreg:
+ perf_pmu_unregister(&pmu->base);
+err_groups:
+ kfree(pmu->base.attr_groups);
+err_attr:
+ free_event_attributes(pmu);
+err_name:
+ if (IS_DGFX(i915))
+ kfree(pmu->name);
+err:
+ drm_notice(&i915->drm, "Failed to register PMU!\n");
+}
+
+void i915_pmu_unregister(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ /*
+ * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
+ * ensures all currently executing ones will have exited before we
+ * proceed with unregistration.
+ */
+ pmu->registered = false;
+ synchronize_rcu();
+
+ hrtimer_cancel(&pmu->timer);
+
+ i915_pmu_unregister_cpuhp_state(pmu);
+
+ perf_pmu_unregister(&pmu->base);
+ kfree(pmu->base.attr_groups);
+ if (IS_DGFX(i915))
+ kfree(pmu->name);
+ free_event_attributes(pmu);
+}
diff --git a/rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/preimage b/rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/preimage
new file mode 100644
index 000000000000..c6563a2b5fcd
--- /dev/null
+++ b/rr-cache/eec037ad6a075a23260e0e04d4bd98a2421720bb/preimage
@@ -0,0 +1,1371 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "gt/intel_engine.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
+#include "i915_drv.h"
+#include "i915_pmu.h"
+
+/* Frequency for the sampling timer for events which need it. */
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define ENGINE_SAMPLE_MASK \
+ (BIT(I915_SAMPLE_BUSY) | \
+ BIT(I915_SAMPLE_WAIT) | \
+ BIT(I915_SAMPLE_SEMA))
+
+static cpumask_t i915_pmu_cpumask;
+static unsigned int i915_pmu_target_cpu = -1;
+
+static struct i915_pmu *event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct i915_pmu, base);
+}
+
+static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
+{
+ return container_of(pmu, struct drm_i915_private, pmu);
+}
+
+static u8 engine_config_sample(u64 config)
+{
+ return config & I915_PMU_SAMPLE_MASK;
+}
+
+static u8 engine_event_sample(struct perf_event *event)
+{
+ return engine_config_sample(event->attr.config);
+}
+
+static u8 engine_event_class(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
+}
+
+static u8 engine_event_instance(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
+}
+
+static bool is_engine_config(const u64 config)
+{
+ return config < __I915_PMU_OTHER(0);
+}
+
+static unsigned int config_gt_id(const u64 config)
+{
+ return config >> __I915_PMU_GT_SHIFT;
+}
+
+static u64 config_counter(const u64 config)
+{
+ return config & ~(~0ULL << __I915_PMU_GT_SHIFT);
+}
+
+static unsigned int other_bit(const u64 config)
+{
+ unsigned int val;
+
+ switch (config_counter(config)) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+ break;
+ default:
+ /*
+ * Events that do not require sampling, or tracking state
+ * transitions between enabled and disabled can be ignored.
+ */
+ return -1;
+ }
+
+ return I915_ENGINE_SAMPLE_COUNT +
+ config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT +
+ val;
+}
+
+static unsigned int config_bit(const u64 config)
+{
+ if (is_engine_config(config))
+ return engine_config_sample(config);
+ else
+ return other_bit(config);
+}
+
+static u32 config_mask(const u64 config)
+{
+ unsigned int bit = config_bit(config);
+
+ if (__builtin_constant_p(config))
+ BUILD_BUG_ON(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+ else
+ WARN_ON_ONCE(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+
+ return BIT(config_bit(config));
+}
+
+static bool is_engine_event(struct perf_event *event)
+{
+ return is_engine_config(event->attr.config);
+}
+
+static unsigned int event_bit(struct perf_event *event)
+{
+ return config_bit(event->attr.config);
+}
+
+static u32 frequency_enabled_mask(void)
+{
+ unsigned int i;
+ u32 mask = 0;
+
+ for (i = 0; i < I915_PMU_MAX_GT; i++)
+ mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(i));
+
+ return mask;
+}
+
+static bool pmu_needs_timer(struct i915_pmu *pmu)
+{
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ u32 enable;
+
+ /*
+ * Only some counters need the sampling timer.
+ *
+ * We start with a bitmask of all currently enabled events.
+ */
+ enable = pmu->enable;
+
+ /*
+ * Mask out all the ones which do not need the timer, or in
+ * other words keep all the ones that could need the timer.
+ */
+ enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK;
+
+ /*
+ * Also there is software busyness tracking available we do not
+ * need the timer for I915_SAMPLE_BUSY counter.
+ */
+ if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
+ enable &= ~BIT(I915_SAMPLE_BUSY);
+
+ /*
+ * If some bits remain it means we need the sampling timer running.
+ */
+ return enable;
+}
+
+static u64 __get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ u64 val;
+
+ val = intel_rc6_residency_ns(&gt->rc6, INTEL_RC6_RES_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, INTEL_RC6_RES_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, INTEL_RC6_RES_RC6pp);
+
+ return val;
+}
+
+static inline s64 ktime_since_raw(const ktime_t kt)
+{
+ return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
+}
+
+static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
+{
+ return pmu->sample[gt_id][sample].cur;
+}
+
+static void
+store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
+{
+ pmu->sample[gt_id][sample].cur = val;
+}
+
+static void
+add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
+{
+ pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
+}
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
+ struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
+ unsigned long flags;
+ u64 val;
+
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
+ val = __get_rc6(gt);
+ intel_gt_pm_put_async(gt, wakeref);
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (wakeref) {
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
+ } else {
+ /*
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ val = ktime_since_raw(pmu->sleep_last[gt_id]);
+ val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
+ }
+
+ if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
+ val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
+ else
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return val;
+}
+
+static void init_rc6(struct i915_pmu *pmu)
+{
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ u64 val = __get_rc6(gt);
+
+ store_sample(pmu, i, __I915_SAMPLE_RC6, val);
+ store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
+ val);
+ pmu->sleep_last[i] = ktime_get_raw();
+ }
+ }
+}
+
+static void park_rc6(struct intel_gt *gt)
+{
+ struct i915_pmu *pmu = &gt->i915->pmu;
+
+ store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
+ pmu->sleep_last[gt->info.id] = ktime_get_raw();
+}
+
+static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
+{
+ if (!pmu->timer_enabled && pmu_needs_timer(pmu)) {
+ pmu->timer_enabled = true;
+ pmu->timer_last = ktime_get();
+ hrtimer_start_range_ns(&pmu->timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+void i915_pmu_gt_parked(struct intel_gt *gt)
+{
+ struct i915_pmu *pmu = &gt->i915->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ park_rc6(gt);
+
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ pmu->unparked &= ~BIT(gt->info.id);
+ if (pmu->unparked == 0)
+ pmu->timer_enabled = false;
+
+ spin_unlock_irq(&pmu->lock);
+}
+
+void i915_pmu_gt_unparked(struct intel_gt *gt)
+{
+ struct i915_pmu *pmu = &gt->i915->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ /*
+ * Re-enable sampling timer when GPU goes active.
+ */
+ if (pmu->unparked == 0)
+ __i915_pmu_maybe_start_timer(pmu);
+
+ pmu->unparked |= BIT(gt->info.id);
+
+ spin_unlock_irq(&pmu->lock);
+}
+
+static void
+add_sample(struct i915_pmu_sample *sample, u32 val)
+{
+ sample->cur += val;
+}
+
+static bool exclusive_mmio_access(const struct drm_i915_private *i915)
+{
+ /*
+ * We have to avoid concurrent mmio cache line access on gen7 or
+ * risk a machine hang. For a fun history lesson dig out the old
+ * userspace intel_gpu_top and run it on Ivybridge or Haswell!
+ */
+ return GRAPHICS_VER(i915) == 7;
+}
+
+static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ bool busy;
+ u32 val;
+
+ val = ENGINE_READ_FW(engine, RING_CTL);
+ if (val == 0) /* powerwell off => engine idle */
+ return;
+
+ if (val & RING_WAIT)
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ return;
+
+ /*
+ * While waiting on a semaphore or event, MI_MODE reports the
+ * ring as idle. However, previously using the seqno, and with
+ * execlists sampling, we account for the ring waiting as the
+ * engine being busy. Therefore, we record the sample as being
+ * busy if either waiting or !idle.
+ */
+ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+ if (!busy) {
+ val = ENGINE_READ_FW(engine, RING_MI_MODE);
+ busy = !(val & MODE_IDLE);
+ }
+ if (busy)
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
+static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ u32 tail, head, acthd;
+
+ tail = ENGINE_READ_FW(engine, RING_TAIL);
+ head = ENGINE_READ_FW(engine, RING_HEAD);
+ acthd = ENGINE_READ_FW(engine, ACTHD);
+
+ if (head & HEAD_WAIT_I8XX)
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+
+ if (head & HEAD_WAIT_I8XX || head != acthd ||
+ (head & HEAD_ADDR) != (tail & TAIL_ADDR))
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
+static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ if (GRAPHICS_VER(engine->i915) >= 3)
+ gen3_engine_sample(engine, period_ns);
+ else
+ gen2_engine_sample(engine, period_ns);
+}
+
+static void
+engines_sample(struct intel_gt *gt, unsigned int period_ns)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ return;
+
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ for_each_engine(engine, gt, id) {
+ if (!engine->pmu.enable)
+ continue;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ continue;
+
+ if (exclusive_mmio_access(i915)) {
+ spin_lock_irqsave(&engine->uncore->lock, flags);
+ engine_sample(engine, period_ns);
+ spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ } else {
+ engine_sample(engine, period_ns);
+ }
+
+ intel_engine_pm_put_async(engine);
+ }
+}
+
+static bool
+frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
+{
+ return pmu->enable &
+ (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt)));
+}
+
+static void
+frequency_sample(struct intel_gt *gt, unsigned int period_ns)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
+ struct i915_pmu *pmu = &i915->pmu;
+ struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
+
+ if (!frequency_sampling_enabled(pmu, gt_id))
+ return;
+
+ /* Report 0/0 (actual/requested) frequency while parked. */
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!wakeref)
+ return;
+
+ if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
+ u32 val;
+
+ /*
+ * We take a quick peek here without using forcewake
+ * so that we don't perturb the system under observation
+ * (forcewake => !rc6 => increased power use). We expect
+ * that if the read fails because it is outside of the
+ * mmio power well, then it will return 0 -- in which
+ * case we assume the system is running at the intended
+ * frequency. Fortunately, the read should rarely fail!
+ */
+ val = intel_rps_read_actual_frequency_fw(rps);
+ if (!val)
+ val = intel_gpu_freq(rps, rps->cur_freq);
+
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
+ val, period_ns / 1000);
+ }
+
+ if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
+ intel_rps_get_requested_frequency(rps),
+ period_ns / 1000);
+ }
+
+ intel_gt_pm_put_async(gt, wakeref);
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+ struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ unsigned int period_ns;
+ struct intel_gt *gt;
+ unsigned int i;
+ ktime_t now;
+
+ if (!READ_ONCE(pmu->timer_enabled))
+ return HRTIMER_NORESTART;
+
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
+ pmu->timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+
+ for_each_gt(gt, i915, i) {
+ if (!(pmu->unparked & BIT(i)))
+ continue;
+
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
+ }
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
+
+ return HRTIMER_RESTART;
+}
+
+static void i915_pmu_event_destroy(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+
+ drm_WARN_ON(&i915->drm, event->parent);
+
+ drm_dev_put(&i915->drm);
+}
+
+static int
+engine_event_status(struct intel_engine_cs *engine,
+ enum drm_i915_pmu_engine_sample sample)
+{
+ switch (sample) {
+ case I915_SAMPLE_BUSY:
+ case I915_SAMPLE_WAIT:
+ break;
+ case I915_SAMPLE_SEMA:
+ if (GRAPHICS_VER(engine->i915) < 6)
+ return -ENODEV;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int
+config_status(struct drm_i915_private *i915, u64 config)
+{
+ struct intel_gt *gt = to_gt(i915);
+
+ unsigned int gt_id = config_gt_id(config);
+ unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0;
+
+ if (gt_id > max_gt_id)
+ return -ENOENT;
+
+ switch (config_counter(config)) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ /* Requires a mutex for sampling! */
+ return -ENODEV;
+ fallthrough;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ if (GRAPHICS_VER(i915) < 6)
+ return -ENODEV;
+ break;
+ case I915_PMU_INTERRUPTS:
+ if (gt_id)
+ return -ENOENT;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ if (!gt->rc6.supported)
+ return -ENODEV;
+ break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int engine_event_init(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event));
+ if (!engine)
+ return -ENODEV;
+
+ return engine_event_status(engine, engine_event_sample(event));
+}
+
+static int i915_pmu_event_init(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ int ret;
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* only allow running on one cpu at a time */
+ if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
+ return -EINVAL;
+
+ if (is_engine_event(event))
+ ret = engine_event_init(event);
+ else
+ ret = config_status(i915, event->attr.config);
+ if (ret)
+ return ret;
+
+ if (!event->parent) {
+ drm_dev_get(&i915->drm);
+ event->destroy = i915_pmu_event_destroy;
+ }
+
+ return 0;
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ u64 val = 0;
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
+ /* Do nothing */
+ } else if (sample == I915_SAMPLE_BUSY &&
+ intel_engine_supports_stats(engine)) {
+ ktime_t unused;
+
+ val = ktime_to_ns(intel_engine_get_busy_time(engine,
+ &unused));
+ } else {
+ val = engine->pmu.sample[sample].cur;
+ }
+ } else {
+ const unsigned int gt_id = config_gt_id(event->attr.config);
+ const u64 config = config_counter(event->attr.config);
+
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val =
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_ACT),
+ USEC_PER_SEC /* to MHz */);
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val =
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_REQ),
+ USEC_PER_SEC /* to MHz */);
+ break;
+ case I915_PMU_INTERRUPTS:
+ val = READ_ONCE(pmu->irq_count);
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = get_rc6(i915->gt[gt_id]);
+ break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+ if (!pmu->registered) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ prev = local64_read(&hwc->prev_count);
+ do {
+ new = __i915_pmu_event_read(event);
+ } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
+
+ local64_add(new - prev, &event->count);
+}
+
+static void i915_pmu_enable(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ const unsigned int bit = event_bit(event);
+ unsigned long flags;
+
+ if (bit == -1)
+ goto update;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /*
+ * Update the bitmask of enabled events and increment
+ * the event reference counter.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+
+ pmu->enable |= BIT(bit);
+ pmu->enable_count[bit]++;
+
+ /*
+ * Start the sampling timer if needed and not already enabled.
+ */
+ __i915_pmu_maybe_start_timer(pmu);
+
+ /*
+ * For per-engine events the bitmask and reference counting
+ * is stored per engine.
+ */
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
+ I915_ENGINE_SAMPLE_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
+ I915_ENGINE_SAMPLE_COUNT);
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+
+ engine->pmu.enable |= BIT(sample);
+ engine->pmu.enable_count[sample]++;
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+update:
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+}
+
+static void i915_pmu_disable(struct perf_event *event)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ const unsigned int bit = event_bit(event);
+ unsigned long flags;
+
+ if (bit == -1)
+ return;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--engine->pmu.enable_count[sample] == 0)
+ engine->pmu.enable &= ~BIT(sample);
+ }
+
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--pmu->enable_count[bit] == 0) {
+ pmu->enable &= ~BIT(bit);
+ pmu->timer_enabled &= pmu_needs_timer(pmu);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static void i915_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+
+ if (!pmu->registered)
+ return;
+
+ i915_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void i915_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+
+ if (!pmu->registered)
+ goto out;
+
+ if (flags & PERF_EF_UPDATE)
+ i915_pmu_event_read(event);
+
+ i915_pmu_disable(event);
+
+out:
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int i915_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct i915_pmu *pmu = event_to_pmu(event);
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (flags & PERF_EF_START)
+ i915_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void i915_pmu_event_del(struct perf_event *event, int flags)
+{
+ i915_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int i915_pmu_event_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
+struct i915_str_attribute {
+ struct device_attribute attr;
+ const char *str;
+};
+
+static ssize_t i915_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_str_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_str_attribute, attr);
+ return sprintf(buf, "%s\n", eattr->str);
+}
+
+#define I915_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct i915_str_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
+ .str = _config, } \
+ })[0].attr.attr)
+
+static struct attribute *i915_pmu_format_attrs[] = {
+ I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = i915_pmu_format_attrs,
+};
+
+struct i915_ext_attribute {
+ struct device_attribute attr;
+ unsigned long val;
+};
+
+static ssize_t i915_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", eattr->val);
+}
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *i915_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_cpumask_attr_group = {
+ .attrs = i915_cpumask_attrs,
+};
+
+#define __event(__counter, __name, __unit) \
+{ \
+ .counter = (__counter), \
+ .name = (__name), \
+ .unit = (__unit), \
+ .global = false, \
+}
+
+#define __global_event(__counter, __name, __unit) \
+{ \
+ .counter = (__counter), \
+ .name = (__name), \
+ .unit = (__unit), \
+ .global = true, \
+}
+
+#define __engine_event(__sample, __name) \
+{ \
+ .sample = (__sample), \
+ .name = (__name), \
+}
+
+static struct i915_ext_attribute *
+add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
+{
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = i915_pmu_event_show;
+ attr->val = config;
+
+ return ++attr;
+}
+
+static struct perf_pmu_events_attr *
+add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
+ const char *str)
+{
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = perf_event_sysfs_show;
+ attr->event_str = str;
+
+ return ++attr;
+}
+
+static struct attribute **
+create_event_attributes(struct i915_pmu *pmu)
+{
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
+ static const struct {
+ unsigned int counter;
+ const char *name;
+ const char *unit;
+ bool global;
+ } events[] = {
+ __event(0, "actual-frequency", "M"),
+ __event(1, "requested-frequency", "M"),
+ __global_event(2, "interrupts", NULL),
+ __event(3, "rc6-residency", "ns"),
+ __event(4, "software-gt-awake-time", "ns"),
+ };
+ static const struct {
+ enum drm_i915_pmu_engine_sample sample;
+ char *name;
+ } engine_events[] = {
+ __engine_event(I915_SAMPLE_BUSY, "busy"),
+ __engine_event(I915_SAMPLE_SEMA, "sema"),
+ __engine_event(I915_SAMPLE_WAIT, "wait"),
+ };
+ unsigned int count = 0;
+ struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
+ struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
+ struct attribute **attr = NULL, **attr_iter;
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt;
+ unsigned int i, j;
+
+ /* Count how many counters we will be exposing. */
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+
+ if (!config_status(i915, config))
+ count++;
+ }
+ }
+
+ for_each_uabi_engine(engine, i915) {
+ for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
+ if (!engine_event_status(engine,
+ engine_events[i].sample))
+ count++;
+ }
+ }
+
+ /* Allocate attribute objects and table. */
+ i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
+ if (!i915_attr)
+ goto err_alloc;
+
+ pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
+ if (!pmu_attr)
+ goto err_alloc;
+
+ /* Max one pointer of each attribute type plus a termination entry. */
+ attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ goto err_alloc;
+
+ i915_iter = i915_attr;
+ pmu_iter = pmu_attr;
+ attr_iter = attr;
+
+ /* Initialize supported non-engine counters. */
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+ char *str;
+
+ if (config_status(i915, config))
+ continue;
+
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kstrdup(events[i].name, GFP_KERNEL);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u",
+ events[i].name, j);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter = add_i915_attr(i915_iter, str, config);
+
+ if (events[i].unit) {
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kasprintf(GFP_KERNEL, "%s.unit",
+ events[i].name);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u.unit",
+ events[i].name, j);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str,
+ events[i].unit);
+ }
+ }
+ }
+
+ /* Initialize supported engine counters. */
+ for_each_uabi_engine(engine, i915) {
+ for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
+ char *str;
+
+ if (engine_event_status(engine,
+ engine_events[i].sample))
+ continue;
+
+ str = kasprintf(GFP_KERNEL, "%s-%s",
+ engine->name, engine_events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter =
+ add_i915_attr(i915_iter, str,
+ __I915_PMU_ENGINE(engine->uabi_class,
+ engine->uabi_instance,
+ engine_events[i].sample));
+
+ str = kasprintf(GFP_KERNEL, "%s-%s.unit",
+ engine->name, engine_events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
+ }
+ }
+
+ pmu->i915_attr = i915_attr;
+ pmu->pmu_attr = pmu_attr;
+
+ return attr;
+
+err:;
+ for (attr_iter = attr; *attr_iter; attr_iter++)
+ kfree((*attr_iter)->name);
+
+err_alloc:
+ kfree(attr);
+ kfree(i915_attr);
+ kfree(pmu_attr);
+
+ return NULL;
+}
+
+static void free_event_attributes(struct i915_pmu *pmu)
+{
+ struct attribute **attr_iter = pmu->events_attr_group.attrs;
+
+ for (; *attr_iter; attr_iter++)
+ kfree((*attr_iter)->name);
+
+ kfree(pmu->events_attr_group.attrs);
+ kfree(pmu->i915_attr);
+ kfree(pmu->pmu_attr);
+
+ pmu->events_attr_group.attrs = NULL;
+ pmu->i915_attr = NULL;
+ pmu->pmu_attr = NULL;
+}
+
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
+
+ /* Select the first online CPU as a designated reader. */
+ if (cpumask_empty(&i915_pmu_cpumask))
+ cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+
+ return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
+ unsigned int target = i915_pmu_target_cpu;
+
+ /*
+ * Unregistering an instance generates a CPU offline event which we must
+ * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
+ */
+ if (!pmu->registered)
+ return 0;
+
+ if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+ target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+
+ /* Migrate events if there is a valid target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &i915_pmu_cpumask);
+ i915_pmu_target_cpu = target;
+ }
+ }
+
+ if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
+ perf_pmu_migrate_context(&pmu->base, cpu, target);
+ pmu->cpuhp.cpu = target;
+ }
+
+ return 0;
+}
+
+static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
+
+int i915_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/x86/intel/i915:online",
+ i915_pmu_cpu_online,
+ i915_pmu_cpu_offline);
+ if (ret < 0)
+ pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
+ ret);
+ else
+ cpuhp_state = ret;
+
+ return 0;
+}
+
+void i915_pmu_exit(void)
+{
+ if (cpuhp_state != CPUHP_INVALID)
+ cpuhp_remove_multi_state(cpuhp_state);
+}
+
+static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
+{
+ if (cpuhp_state == CPUHP_INVALID)
+ return -EINVAL;
+
+ return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
+}
+
+static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
+{
+ cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
+}
+
+void i915_pmu_register(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+ const struct attribute_group *attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &pmu->events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+ };
+ int ret = -ENOMEM;
+
+<<<<<<<
+ if (GRAPHICS_VER(i915) <= 2) {
+ drm_info(&i915->drm, "PMU not supported for this GPU.");
+ return;
+ }
+=======
+ pmu->closed = true;
+>>>>>>>
+
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+ pmu->cpuhp.cpu = -1;
+ init_rc6(pmu);
+
+ if (IS_DGFX(i915)) {
+ pmu->name = kasprintf(GFP_KERNEL,
+ "i915_%s",
+ dev_name(i915->drm.dev));
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
+ pmu->name = "i915";
+ }
+ if (!pmu->name)
+ goto err;
+
+ pmu->events_attr_group.name = "events";
+ pmu->events_attr_group.attrs = create_event_attributes(pmu);
+ if (!pmu->events_attr_group.attrs)
+ goto err_name;
+
+ pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+ if (!pmu->base.attr_groups)
+ goto err_attr;
+
+ pmu->base.module = THIS_MODULE;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = i915_pmu_event_init;
+ pmu->base.add = i915_pmu_event_add;
+ pmu->base.del = i915_pmu_event_del;
+ pmu->base.start = i915_pmu_event_start;
+ pmu->base.stop = i915_pmu_event_stop;
+ pmu->base.read = i915_pmu_event_read;
+ pmu->base.event_idx = i915_pmu_event_event_idx;
+
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
+ if (ret)
+ goto err_groups;
+
+ ret = i915_pmu_register_cpuhp_state(pmu);
+ if (ret)
+ goto err_unreg;
+
+<<<<<<<
+ if (drmm_add_action(&i915->drm, free_pmu, pmu))
+ goto err_unreg;
+
+ pmu->registered = true;
+=======
+ pmu->closed = false;
+>>>>>>>
+
+ return;
+
+err_unreg:
+ perf_pmu_unregister(&pmu->base);
+err_groups:
+ kfree(pmu->base.attr_groups);
+err_attr:
+ free_event_attributes(pmu);
+err_name:
+ if (IS_DGFX(i915))
+ kfree(pmu->name);
+err:
+ drm_notice(&i915->drm, "Failed to register PMU!\n");
+}
+
+void i915_pmu_unregister(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ /*
+ * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
+ * ensures all currently executing ones will have exited before we
+ * proceed with unregistration.
+ */
+ pmu->registered = false;
+ synchronize_rcu();
+
+ hrtimer_cancel(&pmu->timer);
+
+ i915_pmu_unregister_cpuhp_state(pmu);
+
+ perf_pmu_unregister(&pmu->base);
+ kfree(pmu->base.attr_groups);
+ if (IS_DGFX(i915))
+ kfree(pmu->name);
+ free_event_attributes(pmu);
+}