diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 519 |
1 files changed, 225 insertions, 294 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b1c31967194b..9adc7bb9e69c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -45,8 +45,8 @@ #include <linux/pm_qos.h> #include <linux/reservation.h> #include <linux/shmem_fs.h> +#include <linux/stackdepot.h> -#include <drm/drmP.h> #include <drm/intel-gtt.h> #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ #include <drm/drm_gem.h> @@ -54,6 +54,7 @@ #include <drm/drm_cache.h> #include <drm/drm_util.h> #include <drm/drm_dsc.h> +#include <drm/drm_connector.h> #include "i915_fixed.h" #include "i915_params.h" @@ -90,8 +91,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20181204" -#define DRIVER_TIMESTAMP 1543944377 +#define DRIVER_DATE "20190207" +#define DRIVER_TIMESTAMP 1549572331 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -130,6 +131,8 @@ bool i915_error_injected(void); __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) +typedef depot_stack_handle_t intel_wakeref_t; + enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ @@ -281,16 +284,14 @@ struct drm_i915_display_funcs { int (*get_fifo_size)(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane); int (*compute_pipe_wm)(struct intel_crtc_state *cstate); - int (*compute_intermediate_wm)(struct drm_device *dev, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *newstate); + int (*compute_intermediate_wm)(struct intel_crtc_state *newstate); void (*initial_watermarks)(struct intel_atomic_state *state, struct intel_crtc_state *cstate); void (*atomic_update_watermarks)(struct intel_atomic_state *state, struct intel_crtc_state *cstate); void (*optimize_watermarks)(struct intel_atomic_state *state, struct intel_crtc_state *cstate); - int (*compute_global_watermarks)(struct drm_atomic_state *state); + int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, @@ -322,8 +323,20 @@ struct drm_i915_display_funcs { /* display clock increase/decrease */ /* pll clock increase/decrease */ - void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); - void (*load_luts)(struct drm_crtc_state *crtc_state); + /* + * Program double buffered color management registers during + * vblank evasion. The registers should then latch during the + * next vblank start, alongside any other double buffered registers + * involved with the same commit. + */ + void (*color_commit)(const struct intel_crtc_state *crtc_state); + /* + * Load LUTs (and other single buffered color management + * registers). Will (hopefully) be called during the vblank + * following the latching of any double buffered registers + * involved with the same commit. + */ + void (*load_luts)(const struct intel_crtc_state *crtc_state); }; #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) @@ -333,16 +346,17 @@ struct drm_i915_display_funcs { struct intel_csr { struct work_struct work; const char *fw_path; - uint32_t required_version; - uint32_t max_fw_size; /* bytes */ - uint32_t *dmc_payload; - uint32_t dmc_fw_size; /* dwords */ - uint32_t version; - uint32_t mmio_count; + u32 required_version; + u32 max_fw_size; /* bytes */ + u32 *dmc_payload; + u32 dmc_fw_size; /* dwords */ + u32 version; + u32 mmio_count; i915_reg_t mmioaddr[8]; - uint32_t mmiodata[8]; - uint32_t dc_state; - uint32_t allowed_dc_mask; + u32 mmiodata[8]; + u32 dc_state; + u32 allowed_dc_mask; + intel_wakeref_t wakeref; }; enum i915_cache_level { @@ -398,7 +412,7 @@ struct intel_fbc { struct { unsigned int mode_flags; - uint32_t hsw_bdw_pixel_rate; + u32 hsw_bdw_pixel_rate; } crtc; struct { @@ -417,7 +431,7 @@ struct intel_fbc { int y; - uint16_t pixel_blend_mode; + u16 pixel_blend_mode; } plane; struct { @@ -509,6 +523,7 @@ struct i915_psr { ktime_t last_exit; bool sink_not_reliable; bool irq_aux_error; + u16 su_x_granularity; }; enum intel_pch { @@ -556,7 +571,7 @@ struct i915_suspend_saved_registers { u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF3[3]; - uint64_t saveFENCE[I915_MAX_NUM_FENCES]; + u64 saveFENCE[I915_MAX_NUM_FENCES]; u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -819,6 +834,8 @@ struct i915_power_domains { bool display_core_suspended; int power_well_count; + intel_wakeref_t wakeref; + struct mutex lock; int domain_use_count[POWER_DOMAIN_NUM]; struct i915_power_well *power_wells; @@ -901,9 +918,9 @@ struct i915_gem_mm { atomic_t bsd_engine_dispatch_index; /** Bit 6 swizzling required for X tiling */ - uint32_t bit_6_swizzle_x; + u32 bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ - uint32_t bit_6_swizzle_y; + u32 bit_6_swizzle_y; /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; @@ -930,18 +947,20 @@ struct ddi_vbt_port_info { * populate this field. */ #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff - uint8_t hdmi_level_shift; + u8 hdmi_level_shift; - uint8_t supports_dvi:1; - uint8_t supports_hdmi:1; - uint8_t supports_dp:1; - uint8_t supports_edp:1; + u8 supports_dvi:1; + u8 supports_hdmi:1; + u8 supports_dp:1; + u8 supports_edp:1; + u8 supports_typec_usb:1; + u8 supports_tbt:1; - uint8_t alternate_aux_channel; - uint8_t alternate_ddc_pin; + u8 alternate_aux_channel; + u8 alternate_ddc_pin; - uint8_t dp_boost_level; - uint8_t hdmi_boost_level; + u8 dp_boost_level; + u8 hdmi_boost_level; int dp_max_link_rate; /* 0 for not limited by VBT */ }; @@ -1032,41 +1051,41 @@ enum intel_ddb_partitioning { struct intel_wm_level { bool enable; - uint32_t pri_val; - uint32_t spr_val; - uint32_t cur_val; - uint32_t fbc_val; + u32 pri_val; + u32 spr_val; + u32 cur_val; + u32 fbc_val; }; struct ilk_wm_values { - uint32_t wm_pipe[3]; - uint32_t wm_lp[3]; - uint32_t wm_lp_spr[3]; - uint32_t wm_linetime[3]; + u32 wm_pipe[3]; + u32 wm_lp[3]; + u32 wm_lp_spr[3]; + u32 wm_linetime[3]; bool enable_fbc_wm; enum intel_ddb_partitioning partitioning; }; struct g4x_pipe_wm { - uint16_t plane[I915_MAX_PLANES]; - uint16_t fbc; + u16 plane[I915_MAX_PLANES]; + u16 fbc; }; struct g4x_sr_wm { - uint16_t plane; - uint16_t cursor; - uint16_t fbc; + u16 plane; + u16 cursor; + u16 fbc; }; struct vlv_wm_ddl_values { - uint8_t plane[I915_MAX_PLANES]; + u8 plane[I915_MAX_PLANES]; }; struct vlv_wm_values { struct g4x_pipe_wm pipe[3]; struct g4x_sr_wm sr; struct vlv_wm_ddl_values ddl[3]; - uint8_t level; + u8 level; bool cxsr; }; @@ -1080,10 +1099,10 @@ struct g4x_wm_values { }; struct skl_ddb_entry { - uint16_t start, end; /* in number of blocks, 'end' is exclusive */ + u16 start, end; /* in number of blocks, 'end' is exclusive */ }; -static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) +static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) { return entry->end - entry->start; } @@ -1107,8 +1126,9 @@ struct skl_ddb_values { }; struct skl_wm_level { - uint16_t plane_res_b; - uint8_t plane_res_l; + u16 min_ddb_alloc; + u16 plane_res_b; + u8 plane_res_l; bool plane_en; }; @@ -1117,15 +1137,15 @@ struct skl_wm_params { bool x_tiled, y_tiled; bool rc_surface; bool is_planar; - uint32_t width; - uint8_t cpp; - uint32_t plane_pixel_rate; - uint32_t y_min_scanlines; - uint32_t plane_bytes_per_line; + u32 width; + u8 cpp; + u32 plane_pixel_rate; + u32 y_min_scanlines; + u32 plane_bytes_per_line; uint_fixed_16_16_t plane_blocks_per_line; uint_fixed_16_16_t y_tile_minimum; - uint32_t linetime_us; - uint32_t dbuf_block_size; + u32 linetime_us; + u32 dbuf_block_size; }; /* @@ -1155,6 +1175,25 @@ struct i915_runtime_pm { atomic_t wakeref_count; bool suspended; bool irqs_enabled; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + /* + * To aide detection of wakeref leaks and general misuse, we + * track all wakeref holders. With manual markup (i.e. returning + * a cookie to each rpm_get caller which they then supply to their + * paired rpm_put) we can remove corresponding pairs of and keep + * the array trimmed to active wakerefs. + */ + struct intel_runtime_pm_debug { + spinlock_t lock; + + depot_stack_handle_t last_acquire; + depot_stack_handle_t last_release; + + depot_stack_handle_t *owners; + unsigned long count; + } debug; +#endif }; enum intel_pipe_crc_source { @@ -1311,6 +1350,12 @@ struct i915_perf_stream { struct list_head link; /** + * @wakeref: As we keep the device awake while the perf stream is + * active, we track our runtime pm reference for later release. + */ + intel_wakeref_t wakeref; + + /** * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` * properties given when opening a stream, representing the contents * of a single sample as read() by userspace. @@ -1430,7 +1475,8 @@ struct drm_i915_private { struct kmem_cache *dependencies; struct kmem_cache *priorities; - const struct intel_device_info info; + const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ + struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ struct intel_driver_caps caps; /** @@ -1482,14 +1528,14 @@ struct drm_i915_private { * Base address of where the gmbus and gpio blocks are located (either * on PCH or on SoC for platforms without PCH). */ - uint32_t gpio_mmio_base; + u32 gpio_mmio_base; /* MMIO base address for MIPI regs */ - uint32_t mipi_mmio_base; + u32 mipi_mmio_base; - uint32_t psr_mmio_base; + u32 psr_mmio_base; - uint32_t pps_mmio_base; + u32 pps_mmio_base; wait_queue_head_t gmbus_wait_queue; @@ -1744,17 +1790,17 @@ struct drm_i915_private { * in 0.5us units for WM1+. */ /* primary */ - uint16_t pri_latency[5]; + u16 pri_latency[5]; /* sprite */ - uint16_t spr_latency[5]; + u16 spr_latency[5]; /* cursor */ - uint16_t cur_latency[5]; + u16 cur_latency[5]; /* * Raw watermark memory latency values * for SKL for all 8 levels * in 1us units. */ - uint16_t skl_latency[8]; + u16 skl_latency[8]; /* current hardware state */ union { @@ -1764,7 +1810,7 @@ struct drm_i915_private { struct g4x_wm_values g4x; }; - uint8_t max_level; + u8 max_level; /* * Should be held around atomic WM register writing; also @@ -1942,12 +1988,18 @@ struct drm_i915_private { void (*resume)(struct drm_i915_private *); void (*cleanup_engine)(struct intel_engine_cs *engine); - struct list_head timelines; + struct i915_gt_timelines { + struct mutex mutex; /* protects list, tainted by GPU */ + struct list_head active_list; + + /* Pack multiple timelines' seqnos into the same page */ + spinlock_t hwsp_lock; + struct list_head hwsp_free_list; + } timelines; struct list_head active_rings; struct list_head closed_vma; u32 active_requests; - u32 request_serial; /** * Is the GPU currently considered idle, or busy executing @@ -1956,7 +2008,7 @@ struct drm_i915_private { * In order to reduce the effect on performance, there * is a slight delay before we do so. */ - bool awake; + intel_wakeref_t awake; /** * The number of times we have woken up. @@ -2191,17 +2243,12 @@ static inline unsigned int i915_sg_segment_size(void) return size; } -static inline const struct intel_device_info * -intel_info(const struct drm_i915_private *dev_priv) -{ - return &dev_priv->info; -} - -#define INTEL_INFO(dev_priv) intel_info((dev_priv)) +#define INTEL_INFO(dev_priv) (&(dev_priv)->__info) +#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) -#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) -#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) +#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen) +#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) #define REVID_FOREVER 0xff #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) @@ -2212,8 +2259,12 @@ intel_info(const struct drm_i915_private *dev_priv) GENMASK((e) - 1, (s) - 1)) /* Returns true if Gen is in inclusive range [Start, End] */ -#define IS_GEN(dev_priv, s, e) \ - (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) +#define IS_GEN_RANGE(dev_priv, s, e) \ + (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e)))) + +#define IS_GEN(dev_priv, n) \ + (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ + INTEL_INFO(dev_priv)->gen == (n)) /* * Return true if revision is in range [since,until] inclusive. @@ -2223,7 +2274,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_REVID(p, since, until) \ (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) -#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p)) +#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p)) #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) @@ -2245,7 +2296,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ - (dev_priv)->info.gt == 1) + INTEL_INFO(dev_priv)->gt == 1) #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) @@ -2257,7 +2308,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) -#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) +#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ @@ -2268,11 +2319,13 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xf) == 0xe) #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) +#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ + INTEL_INFO(dev_priv)->gt == 1) /* ULX machines are also considered ULT. */ #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ INTEL_DEVID(dev_priv) == 0x0A1E) @@ -2295,23 +2348,25 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ INTEL_DEVID(dev_priv) == 0x87C0) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 2) + INTEL_INFO(dev_priv)->gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 4) + INTEL_INFO(dev_priv)->gt == 4) #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 2) + INTEL_INFO(dev_priv)->gt == 2) #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) #define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ - (dev_priv)->info.gt == 2) + INTEL_INFO(dev_priv)->gt == 2) #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) #define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004) +#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \ + INTEL_DEVID(dev_priv) != 0x8A51) #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) @@ -2366,26 +2421,9 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_ICL_REVID(p, since, until) \ (IS_ICELAKE(p) && IS_REVID(p, since, until)) -/* - * The genX designation typically refers to the render engine, so render - * capability related checks should use IS_GEN, while display and other checks - * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular - * chips, etc.). - */ -#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) -#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) -#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) -#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) -#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) -#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) -#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) -#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) -#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9))) -#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10))) - #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) -#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) -#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) +#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) +#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) #define ENGINE_MASK(id) BIT(id) #define RENDER_RING ENGINE_MASK(RCS) @@ -2399,29 +2437,27 @@ intel_info(const struct drm_i915_private *dev_priv) #define ALL_ENGINES (~0) #define HAS_ENGINE(dev_priv, id) \ - (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) + (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) -#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv) - -#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) -#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) +#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) +#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) -#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) +#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ - ((dev_priv)->info.has_logical_ring_contexts) + (INTEL_INFO(dev_priv)->has_logical_ring_contexts) #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ - ((dev_priv)->info.has_logical_ring_elsq) + (INTEL_INFO(dev_priv)->has_logical_ring_elsq) #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ - ((dev_priv)->info.has_logical_ring_preemption) + (INTEL_INFO(dev_priv)->has_logical_ring_preemption) #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) @@ -2435,12 +2471,12 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ GEM_BUG_ON((sizes) == 0); \ - ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ + ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ }) -#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay) +#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ - ((dev_priv)->info.display.overlay_needs_physical) + (INTEL_INFO(dev_priv)->display.overlay_needs_physical) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) @@ -2458,42 +2494,42 @@ intel_info(const struct drm_i915_private *dev_priv) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ -#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ +#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \ !(IS_I915G(dev_priv) || \ IS_I915GM(dev_priv))) -#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv) -#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug) +#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) +#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) -#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc) -#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) +#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) +#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) -#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst) +#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) -#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi) -#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) -#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr) +#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) +#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) +#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) -#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) -#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) +#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) +#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ -#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr) +#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr) -#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) -#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) +#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) +#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) -#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc) +#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) /* * For now, anything with a GuC requires uCode loading, and then supports * command submission once loaded. But these are logically independent * properties, so we have separate macros to test them. */ -#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) -#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct) +#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) +#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct) #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) @@ -2502,11 +2538,11 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) /* Having a GuC is not the same as using a GuC */ -#define USES_GUC(dev_priv) intel_uc_is_using_guc() -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() -#define USES_HUC(dev_priv) intel_uc_is_using_huc() +#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv) +#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv) +#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv) -#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) +#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) #define INTEL_PCH_DEVICE_ID_MASK 0xff80 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -2546,12 +2582,12 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) -#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display) +#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) /* DPF == dynamic parity feature */ -#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) +#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2 : HAS_L3_DPF(dev_priv)) @@ -2601,19 +2637,7 @@ extern const struct dev_pm_ops i915_pm_ops; extern int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); extern void i915_driver_unload(struct drm_device *dev); -extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); - -extern void i915_reset(struct drm_i915_private *i915, - unsigned int stalled_mask, - const char *reason); -extern int i915_reset_engine(struct intel_engine_cs *engine, - const char *reason); - -extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); -extern int intel_reset_guc(struct drm_i915_private *dev_priv); -extern int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine); + extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2656,20 +2680,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) &dev_priv->gpu_error.hangcheck_work, delay); } -__printf(4, 5) -void i915_handle_error(struct drm_i915_private *dev_priv, - u32 engine_mask, - unsigned long flags, - const char *fmt, ...); -#define I915_ERROR_CAPTURE BIT(0) - extern void intel_irq_init(struct drm_i915_private *dev_priv); extern void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -void i915_clear_error_registers(struct drm_i915_private *dev_priv); - static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; @@ -2693,45 +2708,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits); + u32 mask, + u32 bits); void ilk_update_display_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void -ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits) { ilk_update_display_irq(dev_priv, bits, bits); } static inline void -ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits) { ilk_update_display_irq(dev_priv, bits, 0); } void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + enum pipe pipe, u32 bits) { bdw_update_pipe_irq(dev_priv, pipe, bits, bits); } static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + enum pipe pipe, u32 bits) { bdw_update_pipe_irq(dev_priv, pipe, bits, 0); } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void -ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) { ibx_display_interrupt_update(dev_priv, bits, bits); } static inline void -ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) { ibx_display_interrupt_update(dev_priv, bits, 0); } @@ -2916,13 +2931,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unpin_pages(obj); } -enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ +enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ I915_MM_NORMAL = 0, - I915_MM_SHRINKER + I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ }; -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass); +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass); void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); enum i915_map_type { @@ -2991,7 +3006,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle, uint64_t *offset); + u32 handle, u64 *offset); int i915_gem_mmap_gtt_version(void); void i915_gem_track_fb(struct drm_i915_gem_object *old, @@ -3008,11 +3023,6 @@ static inline bool i915_reset_backoff(struct i915_gpu_error *error) return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); } -static inline bool i915_reset_handoff(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags)); -} - static inline bool i915_terminally_wedged(struct i915_gpu_error *error) { return unlikely(test_bit(I915_WEDGED, &error->flags)); @@ -3034,18 +3044,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, return READ_ONCE(error->reset_engine_count[engine->id]); } -struct i915_request * -i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); -int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); -void i915_gem_reset(struct drm_i915_private *dev_priv, - unsigned int stalled_mask); -void i915_gem_reset_finish_engine(struct intel_engine_cs *engine); -void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); -void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled); void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); @@ -3142,7 +3142,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, - uint32_t *reg_state); + u32 *reg_state); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3204,7 +3204,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); void i915_gem_shrinker_register(struct drm_i915_private *i915); void i915_gem_shrinker_unregister(struct drm_i915_private *i915); -void i915_gem_shrinker_taints_mutex(struct mutex *mutex); +void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, + struct mutex *mutex); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -3313,7 +3314,21 @@ static inline void intel_unregister_dsm_handler(void) { return; } static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) { - return (struct intel_device_info *)&dev_priv->info; + return (struct intel_device_info *)INTEL_INFO(dev_priv); +} + +static inline struct intel_sseu +intel_device_default_sseu(struct drm_i915_private *i915) +{ + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct intel_sseu value = { + .slice_mask = sseu->slice_mask, + .subslice_mask = sseu->subslice_mask[0], + .min_eus_per_subslice = sseu->max_eus_per_subslice, + .max_eus_per_subslice = sseu->max_eus_per_subslice, + }; + + return value; } /* modesetting */ @@ -3393,10 +3408,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, enum dpio_phy phy); bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy); -uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count); +u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count); void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - uint8_t lane_lat_optim_mask); -uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); + u8 lane_lat_optim_mask); +u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); void chv_set_phy_signal_level(struct intel_encoder *encoder, u32 deemph_reg_value, u32 margin_reg_value, @@ -3599,90 +3614,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) } } -static inline bool -__i915_request_irq_complete(const struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - u32 seqno; - - /* Note that the engine may have wrapped around the seqno, and - * so our request->global_seqno will be ahead of the hardware, - * even though it completed the request before wrapping. We catch - * this by kicking all the waiters before resetting the seqno - * in hardware, and also signal the fence. - */ - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) - return true; - - /* The request was dequeued before we were awoken. We check after - * inspecting the hw to confirm that this was the same request - * that generated the HWS update. The memory barriers within - * the request execution are sufficient to ensure that a check - * after reading the value from hw matches this request. - */ - seqno = i915_request_global_seqno(rq); - if (!seqno) - return false; - - /* Before we do the heavier coherent read of the seqno, - * check the value (hopefully) in the CPU cacheline. - */ - if (__i915_request_completed(rq, seqno)) - return true; - - /* Ensure our read of the seqno is coherent so that we - * do not "miss an interrupt" (i.e. if this is the last - * request and the seqno write from the GPU is not visible - * by the time the interrupt fires, we will see that the - * request is incomplete and go back to sleep awaiting - * another interrupt that will never come.) - * - * Strictly, we only need to do this once after an interrupt, - * but it is easier and safer to do it every time the waiter - * is woken. - */ - if (engine->irq_seqno_barrier && - test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - /* The ordering of irq_posted versus applying the barrier - * is crucial. The clearing of the current irq_posted must - * be visible before we perform the barrier operation, - * such that if a subsequent interrupt arrives, irq_posted - * is reasserted and our task rewoken (which causes us to - * do another __i915_request_irq_complete() immediately - * and reapply the barrier). Conversely, if the clear - * occurs after the barrier, then an interrupt that arrived - * whilst we waited on the barrier would not trigger a - * barrier on the next pass, and the read may not see the - * seqno update. - */ - engine->irq_seqno_barrier(engine); - - /* If we consume the irq, but we are no longer the bottom-half, - * the real bottom-half may not have serialised their own - * seqno check with the irq-barrier (i.e. may have inspected - * the seqno before we believe it coherent since they see - * irq_posted == false but we are still running). - */ - spin_lock_irq(&b->irq_lock); - if (b->irq_wait && b->irq_wait->tsk != current) - /* Note that if the bottom-half is changed as we - * are sending the wake-up, the new bottom-half will - * be woken by whomever made the change. We only have - * to worry about when we steal the irq-posted for - * ourself. - */ - wake_up_process(b->irq_wait->tsk); - spin_unlock_irq(&b->irq_lock); - - if (__i915_request_completed(rq, seqno)) - return true; - } - - return false; -} - void i915_memcpy_init_early(struct drm_i915_private *dev_priv); bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); |