diff options
author | Dave Airlie <airlied@redhat.com> | 2014-11-25 22:10:53 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-11-25 22:10:53 +1000 |
commit | 955289c7cfad158dc939e150896a240f549ccc60 (patch) | |
tree | 3c3afeec99f63a52904f55906eb987bf89710612 | |
parent | ed1e8777a56f3523712506d608a29f57ed37b613 (diff) | |
parent | 46df9adb2e7709e56ab8aacaff2fc997a6d17239 (diff) |
Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next
Now that we have the bits needed for mdp5 atomic, here is the followup
pull request I mentioned. Main highlights are:
1) mdp5 multiple crtc and public plane support (no more hard-coded mixer setup!)
2) mdp5 atomic conversion
3) couple atomic helper fixes for issues found during mdp5 atomic
debug (reviewed by danvet.. but he didn't plane to send an
atomic-fixes pull request so I agreed to tack them on to mine)
* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
drm/atomic: shutdown *current* encoder
drm/atomic: check mode_changed *after* atomic_check
drm/msm/mdp4: fix mixer setup for multi-crtc + planes
drm/msm/mdp5: dpms(OFF) cleanups
drm/msm/mdp5: atomic
drm/msm: atomic fixes
drm/msm/mdp5: remove global mdp5_ctl_mgr
drm/msm/mdp5: don't use void * for opaque types
drm/msm: add multiple CRTC and overlay support
drm/msm/mdp5: set rate before enabling clk
drm/msm/mdp5: introduce mdp5_cfg module
drm/msm/mdp5: make SMP module dynamically configurable
drm/msm/hdmi: remove useless kref
drm/msm/mdp5: get the core clock rate from MDP5 config
drm/msm/mdp5: use irqdomains
24 files changed, 1737 insertions, 739 deletions
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 690360038dc1..a17b8e9c0a81 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -331,7 +331,7 @@ mode_fixup(struct drm_atomic_state *state) } static int -drm_atomic_helper_check_prepare(struct drm_device *dev, +drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state) { int ncrtcs = dev->mode_config.num_crtc; @@ -428,10 +428,6 @@ int drm_atomic_helper_check(struct drm_device *dev, int ncrtcs = dev->mode_config.num_crtc; int i, ret = 0; - ret = drm_atomic_helper_check_prepare(dev, state); - if (ret) - return ret; - for (i = 0; i < nplanes; i++) { struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = state->planes[i]; @@ -475,6 +471,10 @@ int drm_atomic_helper_check(struct drm_device *dev, } } + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + return ret; } EXPORT_SYMBOL(drm_atomic_helper_check); @@ -499,9 +499,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (!old_conn_state || !old_conn_state->crtc) continue; - encoder = connector->state->best_encoder; + encoder = old_conn_state->best_encoder; - if (!encoder) + /* We shouldn't get this far if we didn't previously have + * an encoder.. but WARN_ON() rather than explode. + */ + if (WARN_ON(!encoder)) continue; funcs = encoder->helper_private; diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 0d96132df059..143d988f8add 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -25,6 +25,8 @@ msm-y := \ mdp/mdp4/mdp4_irq.o \ mdp/mdp4/mdp4_kms.o \ mdp/mdp4/mdp4_plane.o \ + mdp/mdp5/mdp5_cfg.o \ + mdp/mdp5/mdp5_ctl.o \ mdp/mdp5/mdp5_crtc.o \ mdp/mdp5/mdp5_encoder.o \ mdp/mdp5/mdp5_irq.o \ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 90077619029d..062c68725376 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -15,6 +15,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/of_irq.h> #include "hdmi.h" void hdmi_set_mode(struct hdmi *hdmi, bool power_on) @@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) power_on ? "Enable" : "Disable", ctrl); } -irqreturn_t hdmi_irq(int irq, void *dev_id) +static irqreturn_t hdmi_irq(int irq, void *dev_id) { struct hdmi *hdmi = dev_id; @@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id) return IRQ_HANDLED; } -void hdmi_destroy(struct kref *kref) +static void hdmi_destroy(struct hdmi *hdmi) { - struct hdmi *hdmi = container_of(kref, struct hdmi, refcount); struct hdmi_phy *phy = hdmi->phy; if (phy) @@ -84,8 +84,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } - kref_init(&hdmi->refcount); - hdmi->pdev = pdev; hdmi->config = config; @@ -182,7 +180,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) fail: if (hdmi) - hdmi_destroy(&hdmi->refcount); + hdmi_destroy(hdmi); return ERR_PTR(ret); } @@ -200,7 +198,6 @@ int hdmi_modeset_init(struct hdmi *hdmi, { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = hdmi->pdev; - struct hdmi_platform_config *config = pdev->dev.platform_data; int ret; hdmi->dev = dev; @@ -224,22 +221,20 @@ int hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - if (!config->shared_irq) { - hdmi->irq = platform_get_irq(pdev, 0); - if (hdmi->irq < 0) { - ret = hdmi->irq; - dev_err(dev->dev, "failed to get irq: %d\n", ret); - goto fail; - } + hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (hdmi->irq < 0) { + ret = hdmi->irq; + dev_err(dev->dev, "failed to get irq: %d\n", ret); + goto fail; + } - ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, - NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - "hdmi_isr", hdmi); - if (ret < 0) { - dev_err(dev->dev, "failed to request IRQ%u: %d\n", - hdmi->irq, ret); - goto fail; - } + ret = devm_request_irq(&pdev->dev, hdmi->irq, + hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "hdmi_isr", hdmi); + if (ret < 0) { + dev_err(dev->dev, "failed to request IRQ%u: %d\n", + hdmi->irq, ret); + goto fail; } encoder->bridge = hdmi->bridge; @@ -271,12 +266,6 @@ fail: #include <linux/of_gpio.h> -static void set_hdmi(struct drm_device *dev, struct hdmi *hdmi) -{ - struct msm_drm_private *priv = dev->dev_private; - priv->hdmi = hdmi; -} - #ifdef CONFIG_OF static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) { @@ -297,6 +286,8 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char static int hdmi_bind(struct device *dev, struct device *master, void *data) { + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; static struct hdmi_platform_config config = {}; struct hdmi *hdmi; #ifdef CONFIG_OF @@ -318,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.pwr_clk_names = pwr_clk_names; config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); - config.shared_irq = true; } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; @@ -392,14 +382,19 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) hdmi = hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); - set_hdmi(dev_get_drvdata(master), hdmi); + priv->hdmi = hdmi; return 0; } static void hdmi_unbind(struct device *dev, struct device *master, void *data) { - set_hdmi(dev_get_drvdata(master), NULL); + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + if (priv->hdmi) { + hdmi_destroy(priv->hdmi); + priv->hdmi = NULL; + } } static const struct component_ops hdmi_ops = { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index b981995410b5..43e654f751b7 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -38,8 +38,6 @@ struct hdmi_audio { }; struct hdmi { - struct kref refcount; - struct drm_device *dev; struct platform_device *pdev; @@ -97,13 +95,9 @@ struct hdmi_platform_config { /* gpio's: */ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; int mux_lpm_gpio; - - /* older devices had their own irq, mdp5+ it is shared w/ mdp: */ - bool shared_irq; }; void hdmi_set_mode(struct hdmi *hdmi, bool power_on); -void hdmi_destroy(struct kref *kref); static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) { @@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) return msm_readl(hdmi->mmio + reg); } -static inline struct hdmi * hdmi_reference(struct hdmi *hdmi) -{ - kref_get(&hdmi->refcount); - return hdmi; -} - -static inline void hdmi_unreference(struct hdmi *hdmi) -{ - kref_put(&hdmi->refcount, hdmi_destroy); -} - /* * The phy appears to be different, for example between 8960 and 8x60, * so split the phy related functions out and load the correct one at diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index f6cf745c249e..6902ad6da710 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -26,7 +26,6 @@ struct hdmi_bridge { static void hdmi_bridge_destroy(struct drm_bridge *bridge) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); - hdmi_unreference(hdmi_bridge->hdmi); drm_bridge_cleanup(bridge); kfree(hdmi_bridge); } @@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) goto fail; } - hdmi_bridge->hdmi = hdmi_reference(hdmi); + hdmi_bridge->hdmi = hdmi; bridge = &hdmi_bridge->base; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 0aecb2580072..fbebb0405d76 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector) drm_connector_unregister(connector); drm_connector_cleanup(connector); - hdmi_unreference(hdmi_connector->hdmi); - kfree(hdmi_connector); } @@ -425,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) goto fail; } - hdmi_connector->hdmi = hdmi_reference(hdmi); + hdmi_connector->hdmi = hdmi; INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); connector = &hdmi_connector->base; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index fef22e8cabb6..6781aa994613 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -167,34 +167,54 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -static void blend_setup(struct drm_crtc *crtc) +/* statically (for now) map planes to mixer stage (z-order): */ +static const int idxs[] = { + [VG1] = 1, + [VG2] = 2, + [RGB1] = 0, + [RGB2] = 0, + [RGB3] = 0, + [VG3] = 3, + [VG4] = 4, + +}; + +/* setup mixer config, for which we need to consider all crtc's and + * the planes attached to them + * + * TODO may possibly need some extra locking here + */ +static void setup_mixer(struct mdp4_kms *mdp4_kms) { - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - struct drm_plane *plane; - int i, ovlp = mdp4_crtc->ovlp; + struct drm_mode_config *config = &mdp4_kms->dev->mode_config; + struct drm_crtc *crtc; uint32_t mixer_cfg = 0; static const enum mdp_mixer_stage_id stages[] = { STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, }; - /* statically (for now) map planes to mixer stage (z-order): */ - static const int idxs[] = { - [VG1] = 1, - [VG2] = 2, - [RGB1] = 0, - [RGB2] = 0, - [RGB3] = 0, - [VG3] = 3, - [VG4] = 4, - }; - bool alpha[4]= { false, false, false, false }; + list_for_each_entry(crtc, &config->crtc_list, head) { + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_plane *plane; - /* Don't rely on value read back from hw, but instead use our - * own shadowed value. Possibly disable/reenable looses the - * previous value and goes back to power-on default? - */ - mixer_cfg = mdp4_kms->mixer_cfg; + for_each_plane_on_crtc(crtc, plane) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + int idx = idxs[pipe_id]; + mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, + pipe_id, stages[idx]); + } + } + + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); +} + +static void blend_setup(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_plane *plane; + int i, ovlp = mdp4_crtc->ovlp; + bool alpha[4]= { false, false, false, false }; mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); @@ -209,13 +229,8 @@ static void blend_setup(struct drm_crtc *crtc) to_mdp_format(msm_framebuffer_format(plane->fb)); alpha[idx-1] = format->alpha_enable; } - mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, - pipe_id, stages[idx]); } - /* this shouldn't happen.. and seems to cause underflow: */ - WARN_ON(!mixer_cfg); - for (i = 0; i < 4; i++) { uint32_t op; @@ -238,8 +253,7 @@ static void blend_setup(struct drm_crtc *crtc) mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); } - mdp4_kms->mixer_cfg = mixer_cfg; - mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); + setup_mixer(mdp4_kms); } static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 770645296f11..cbd77bc626d5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -32,13 +32,6 @@ struct mdp4_kms { int rev; - /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all - * crtcs/encoders is in one shared register, we need to update it - * via read/modify/write. But to avoid getting confused by power- - * on-default values after resume, use this shadow value instead: - */ - uint32_t mixer_cfg; - /* mapper-id used to request GEM buffer mapped for scanout: */ int id; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c new file mode 100644 index 000000000000..b0a44310cf2a --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mdp5_kms.h" +#include "mdp5_cfg.h" + +struct mdp5_cfg_handler { + int revision; + struct mdp5_cfg config; +}; + +/* mdp5_cfg must be exposed (used in mdp5.xml.h) */ +const struct mdp5_cfg_hw *mdp5_cfg = NULL; + +const struct mdp5_cfg_hw msm8x74_config = { + .name = "msm8x74", + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + }, + .ctl = { + .count = 5, + .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01200, 0x01600, 0x01a00 }, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01e00, 0x02200, 0x02600 }, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02a00, 0x02e00 }, + }, + .lm = { + .count = 5, + .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, + .nb_stages = 5, + }, + .dspp = { + .count = 3, + .base = { 0x04600, 0x04a00, 0x04e00 }, + }, + .ad = { + .count = 2, + .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ + }, + .intf = { + .count = 4, + .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, + }, + .max_clk = 200000000, +}; + +const struct mdp5_cfg_hw apq8084_config = { + .name = "apq8084", + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */ + .reserved[CID_RGB0] = 2, + .reserved[CID_RGB1] = 2, + .reserved[CID_RGB2] = 2, + .reserved[CID_RGB3] = 2, + }, + .ctl = { + .count = 5, + .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + }, + .pipe_vig = { + .count = 4, + .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, + }, + .pipe_dma = { + .count = 2, + .base = { 0x03200, 0x03600 }, + }, + .lm = { + .count = 6, + .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, + .nb_stages = 5, + }, + .dspp = { + .count = 4, + .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 }, + + }, + .ad = { + .count = 3, + .base = { 0x13500, 0x13700, 0x13900 }, + }, + .intf = { + .count = 5, + .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, + }, + .max_clk = 320000000, +}; + +static const struct mdp5_cfg_handler cfg_handlers[] = { + { .revision = 0, .config = { .hw = &msm8x74_config } }, + { .revision = 2, .config = { .hw = &msm8x74_config } }, + { .revision = 3, .config = { .hw = &apq8084_config } }, +}; + + +static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev); + +const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) +{ + return cfg_handler->config.hw; +} + +struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler) +{ + return &cfg_handler->config; +} + +int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler) +{ + return cfg_handler->revision; +} + +void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler) +{ + kfree(cfg_handler); +} + +struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, + uint32_t major, uint32_t minor) +{ + struct drm_device *dev = mdp5_kms->dev; + struct platform_device *pdev = dev->platformdev; + struct mdp5_cfg_handler *cfg_handler; + struct mdp5_cfg_platform *pconfig; + int i, ret = 0; + + cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL); + if (unlikely(!cfg_handler)) { + ret = -ENOMEM; + goto fail; + } + + if (major != 1) { + dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + /* only after mdp5_cfg global pointer's init can we access the hw */ + for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) { + if (cfg_handlers[i].revision != minor) + continue; + mdp5_cfg = cfg_handlers[i].config.hw; + + break; + } + if (unlikely(!mdp5_cfg)) { + dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + cfg_handler->revision = minor; + cfg_handler->config.hw = mdp5_cfg; + + pconfig = mdp5_get_config(pdev); + memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig)); + + DBG("MDP5: %s hw config selected", mdp5_cfg->name); + + return cfg_handler; + +fail: + if (cfg_handler) + mdp5_cfg_destroy(cfg_handler); + + return NULL; +} + +static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) +{ + static struct mdp5_cfg_platform config = {}; +#ifdef CONFIG_OF + /* TODO */ +#endif + config.iommu = iommu_domain_alloc(&platform_bus_type); + + return &config; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h new file mode 100644 index 000000000000..dba4d52cceeb --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDP5_CFG_H__ +#define __MDP5_CFG_H__ + +#include "msm_drv.h" + +/* + * mdp5_cfg + * + * This module configures the dynamic offsets used by mdp5.xml.h + * (initialized in mdp5_cfg.c) + */ +extern const struct mdp5_cfg_hw *mdp5_cfg; + +#define MAX_CTL 8 +#define MAX_BASES 8 +#define MAX_SMP_BLOCKS 44 +#define MAX_CLIENTS 32 + +typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); + +#define MDP5_SUB_BLOCK_DEFINITION \ + int count; \ + uint32_t base[MAX_BASES] + +struct mdp5_sub_block { + MDP5_SUB_BLOCK_DEFINITION; +}; + +struct mdp5_lm_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t nb_stages; /* number of stages per blender */ +}; + +struct mdp5_smp_block { + int mmb_count; /* number of SMP MMBs */ + int mmb_size; /* MMB: size in bytes */ + mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ + int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ +}; + +struct mdp5_cfg_hw { + char *name; + + struct mdp5_smp_block smp; + struct mdp5_sub_block ctl; + struct mdp5_sub_block pipe_vig; + struct mdp5_sub_block pipe_rgb; + struct mdp5_sub_block pipe_dma; + struct mdp5_lm_block lm; + struct mdp5_sub_block dspp; + struct mdp5_sub_block ad; + struct mdp5_sub_block intf; + + uint32_t max_clk; +}; + +/* platform config data (ie. from DT, or pdata) */ +struct mdp5_cfg_platform { + struct iommu_domain *iommu; +}; + +struct mdp5_cfg { + const struct mdp5_cfg_hw *hw; + struct mdp5_cfg_platform platform; +}; + +struct mdp5_kms; +struct mdp5_cfg_handler; + +const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd); +struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd); +int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); + +struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, + uint32_t major, uint32_t minor); +void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); + +#endif /* __MDP5_CFG_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index b7b32c47fd71..0598bdea4ff4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -17,41 +18,35 @@ #include "mdp5_kms.h" +#include <linux/sort.h> #include <drm/drm_mode.h> #include "drm_crtc.h" #include "drm_crtc_helper.h" #include "drm_flip_work.h" +#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ + struct mdp5_crtc { struct drm_crtc base; char name[8]; int id; bool enabled; - /* which mixer/encoder we route output to: */ - int mixer; + /* layer mixer used for this CRTC (+ its lock): */ +#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id) + int lm; + spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ + + /* CTL used for this CRTC: */ + struct mdp5_ctl *ctl; /* if there is a pending flip, these will be non-null: */ struct drm_pending_vblank_event *event; - struct msm_fence_cb pageflip_cb; #define PENDING_CURSOR 0x1 #define PENDING_FLIP 0x2 atomic_t pending; - /* the fb that we logically (from PoV of KMS API) hold a ref - * to. Which we may not yet be scanning out (we may still - * be scanning out previous in case of page_flip while waiting - * for gpu rendering to complete: - */ - struct drm_framebuffer *fb; - - /* the fb that we currently hold a scanout ref to: */ - struct drm_framebuffer *scanout_fb; - - /* for unref'ing framebuffers after scanout completes: */ - struct drm_flip_work unref_fb_work; - struct mdp_irq vblank; struct mdp_irq err; }; @@ -71,66 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); } -static void crtc_flush(struct drm_crtc *crtc) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - int id = mdp5_crtc->id; - struct drm_plane *plane; - uint32_t flush = 0; - - for_each_plane_on_crtc(crtc, plane) { - enum mdp5_pipe pipe = mdp5_plane_pipe(plane); - flush |= pipe2flush(pipe); - } - - flush |= mixer2flush(mdp5_crtc->id); - flush |= MDP5_CTL_FLUSH_CTL; +#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm) - DBG("%s: flush=%08x", mdp5_crtc->name, flush); - - mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush); -} - -static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) +static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct drm_framebuffer *old_fb = mdp5_crtc->fb; - - /* grab reference to incoming scanout fb: */ - drm_framebuffer_reference(new_fb); - mdp5_crtc->base.primary->fb = new_fb; - mdp5_crtc->fb = new_fb; - if (old_fb) - drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); + DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask); + mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); } -/* unlike update_fb(), take a ref to the new scanout fb *before* updating - * plane, then call this. Needed to ensure we don't unref the buffer that - * is actually still being scanned out. - * - * Note that this whole thing goes away with atomic.. since we can defer - * calling into driver until rendering is done. +/* + * flush updates, to make sure hw is updated to new scanout fb, + * so that we can safely queue unref to current fb (ie. next + * vblank we know hw is done w/ previous scanout_fb). */ -static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) +static void crtc_flush_all(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct drm_plane *plane; + uint32_t flush_mask = 0; - /* flush updates, to make sure hw is updated to new scanout fb, - * so that we can safely queue unref to current fb (ie. next - * vblank we know hw is done w/ previous scanout_fb). - */ - crtc_flush(crtc); - - if (mdp5_crtc->scanout_fb) - drm_flip_work_queue(&mdp5_crtc->unref_fb_work, - mdp5_crtc->scanout_fb); + /* we could have already released CTL in the disable path: */ + if (!mdp5_crtc->ctl) + return; - mdp5_crtc->scanout_fb = fb; + for_each_plane_on_crtc(crtc, plane) { + flush_mask |= mdp5_plane_get_flush(plane); + } + flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); + flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm); - /* enable vblank to complete flip: */ - request_pending(crtc, PENDING_FLIP); + crtc_flush(crtc, flush_mask); } /* if file!=NULL, this is preclose potential cancel-flip path */ @@ -151,6 +118,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) */ if (!file || (event->base.file_priv == file)) { mdp5_crtc->event = NULL; + DBG("%s: send event: %p", mdp5_crtc->name, event); drm_send_vblank_event(dev, mdp5_crtc->id, event); } } @@ -160,38 +128,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) mdp5_plane_complete_flip(plane); } -static void pageflip_cb(struct msm_fence_cb *cb) -{ - struct mdp5_crtc *mdp5_crtc = - container_of(cb, struct mdp5_crtc, pageflip_cb); - struct drm_crtc *crtc = &mdp5_crtc->base; - struct drm_framebuffer *fb = mdp5_crtc->fb; - - if (!fb) - return; - - drm_framebuffer_reference(fb); - mdp5_plane_set_scanout(crtc->primary, fb); - update_scanout(crtc, fb); -} - -static void unref_fb_worker(struct drm_flip_work *work, void *val) -{ - struct mdp5_crtc *mdp5_crtc = - container_of(work, struct mdp5_crtc, unref_fb_work); - struct drm_device *dev = mdp5_crtc->base.dev; - - mutex_lock(&dev->mode_config.mutex); - drm_framebuffer_unreference(val); - mutex_unlock(&dev->mode_config.mutex); -} - static void mdp5_crtc_destroy(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); drm_crtc_cleanup(crtc); - drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work); kfree(mdp5_crtc); } @@ -209,6 +150,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode) mdp5_enable(mdp5_kms); mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); } else { + /* set STAGE_UNUSED for all layers */ + mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); mdp5_disable(mdp5_kms); } @@ -223,54 +166,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, return true; } +/* + * blend_setup() - blend all the planes of a CRTC + * + * When border is enabled, the border color will ALWAYS be the base layer. + * Therefore, the first plane (private RGB pipe) will start at STAGE0. + * If disabled, the first plane starts at STAGE_BASE. + * + * Note: + * Border is not enabled here because the private plane is exactly + * the CRTC resolution. + */ static void blend_setup(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - int id = mdp5_crtc->id; + struct drm_plane *plane; + const struct mdp5_cfg_hw *hw_cfg; + uint32_t lm = mdp5_crtc->lm, blend_cfg = 0; + unsigned long flags; +#define blender(stage) ((stage) - STAGE_BASE) - /* - * Hard-coded setup for now until I figure out how the - * layer-mixer works - */ + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* LM[id]: */ - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), - MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), - MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | - MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) | - MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff); - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00); - - /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but - * we want to be setting CTL[m].LAYER[n]. Not sure what the - * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is - * used when chaining up mixers for high resolution displays? - */ + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); + + /* ctl could be released already when we are shutting down: */ + if (!mdp5_crtc->ctl) + goto out; - /* CTL[id]: */ - mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), - MDP5_CTL_LAYER_REG_RGB0(STAGE0) | - MDP5_CTL_LAYER_REG_BORDER_COLOR); - mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); + for_each_plane_on_crtc(crtc, plane) { + enum mdp_mixer_stage_id stage = + to_mdp5_plane_state(plane->state)->stage; + + /* + * Note: This cannot happen with current implementation but + * we need to check this condition once z property is added + */ + BUG_ON(stage > hw_cfg->lm.nb_stages); + + /* LM */ + mdp5_write(mdp5_kms, + REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)), + MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, + blender(stage)), 0xff); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, + blender(stage)), 0x00); + /* CTL */ + blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage); + DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name, + pipe2name(mdp5_plane_pipe(plane)), stage); + } + + DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg); + mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg); + +out: + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } -static int mdp5_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) +static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - int ret; + unsigned long flags; + struct drm_display_mode *mode; - mode = adjusted_mode; + if (WARN_ON(!crtc->state)) + return; + + mode = &crtc->state->adjusted_mode; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", mdp5_crtc->name, mode->base.id, mode->name, @@ -281,28 +248,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - /* grab extra ref for update_scanout() */ - drm_framebuffer_reference(crtc->primary->fb); - - ret = mdp5_plane_mode_set(crtc->primary, crtc, crtc->primary->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, mode->vdisplay << 16); - if (ret) { - drm_framebuffer_unreference(crtc->primary->fb); - dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", - mdp5_crtc->name, ret); - return ret; - } - - mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id), + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm), MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); - - update_fb(crtc, crtc->primary->fb); - update_scanout(crtc, crtc->primary->fb); - - return 0; + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } static void mdp5_crtc_prepare(struct drm_crtc *crtc) @@ -316,65 +266,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc) static void mdp5_crtc_commit(struct drm_crtc *crtc) { + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + DBG("%s", mdp5_crtc->name); mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); - crtc_flush(crtc); + crtc_flush_all(crtc); /* drop the ref to mdp clk's that we got in prepare: */ mdp5_disable(get_kms(crtc)); } -static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static void mdp5_crtc_load_lut(struct drm_crtc *crtc) +{ +} + +struct plane_state { + struct drm_plane *plane; + struct mdp5_plane_state *state; +}; + +static int pstate_cmp(const void *a, const void *b) +{ + struct plane_state *pa = (struct plane_state *)a; + struct plane_state *pb = (struct plane_state *)b; + return pa->state->zpos - pb->state->zpos; +} + +static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - struct drm_plane *plane = crtc->primary; - struct drm_display_mode *mode = &crtc->mode; - int ret; - - /* grab extra ref for update_scanout() */ - drm_framebuffer_reference(crtc->primary->fb); - - ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, mode->vdisplay << 16); - if (ret) { - drm_framebuffer_unreference(crtc->primary->fb); - return ret; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_plane *plane; + struct drm_device *dev = crtc->dev; + struct plane_state pstates[STAGE3 + 1]; + int cnt = 0, i; + + DBG("%s: check", mdp5_crtc->name); + + if (mdp5_crtc->event) { + dev_err(dev->dev, "already pending flip!\n"); + return -EBUSY; } - update_fb(crtc, crtc->primary->fb); - update_scanout(crtc, crtc->primary->fb); + /* request a free CTL, if none is already allocated for this CRTC */ + if (state->enable && !mdp5_crtc->ctl) { + mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); + if (WARN_ON(!mdp5_crtc->ctl)) + return -EINVAL; + } + + /* verify that there are not too many planes attached to crtc + * and that we don't have conflicting mixer stages: + */ + for_each_pending_plane_on_crtc(state->state, crtc, plane) { + struct drm_plane_state *pstate; + + if (cnt >= ARRAY_SIZE(pstates)) { + dev_err(dev->dev, "too many planes!\n"); + return -EINVAL; + } + + pstate = state->state->plane_states[drm_plane_index(plane)]; + + /* plane might not have changed, in which case take + * current state: + */ + if (!pstate) + pstate = plane->state; + + pstates[cnt].plane = plane; + pstates[cnt].state = to_mdp5_plane_state(pstate); + + cnt++; + } + + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + + for (i = 0; i < cnt; i++) { + pstates[i].state->stage = STAGE_BASE + i; + DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, + pipe2name(mdp5_plane_pipe(pstates[i].plane)), + pstates[i].state->stage); + } return 0; } -static void mdp5_crtc_load_lut(struct drm_crtc *crtc) +static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc) { + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + DBG("%s: begin", mdp5_crtc->name); } -static int mdp5_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *new_fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_gem_object *obj; unsigned long flags; - if (mdp5_crtc->event) { - dev_err(dev->dev, "already pending flip!\n"); - return -EBUSY; - } + DBG("%s: flush", mdp5_crtc->name); - obj = msm_framebuffer_bo(new_fb, 0); + WARN_ON(mdp5_crtc->event); spin_lock_irqsave(&dev->event_lock, flags); - mdp5_crtc->event = event; + mdp5_crtc->event = crtc->state->event; spin_unlock_irqrestore(&dev->event_lock, flags); - update_fb(crtc, new_fb); + blend_setup(crtc); + crtc_flush_all(crtc); + request_pending(crtc, PENDING_FLIP); - return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); + if (mdp5_crtc->ctl && !crtc->state->enable) { + mdp5_ctl_release(mdp5_crtc->ctl); + mdp5_crtc->ctl = NULL; + } } static int mdp5_crtc_set_property(struct drm_crtc *crtc, @@ -385,27 +389,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, } static const struct drm_crtc_funcs mdp5_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, + .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, - .page_flip = mdp5_crtc_page_flip, + .page_flip = drm_atomic_helper_page_flip, .set_property = mdp5_crtc_set_property, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { .dpms = mdp5_crtc_dpms, .mode_fixup = mdp5_crtc_mode_fixup, - .mode_set = mdp5_crtc_mode_set, + .mode_set_nofb = mdp5_crtc_mode_set_nofb, + .mode_set = drm_helper_crtc_mode_set, + .mode_set_base = drm_helper_crtc_mode_set_base, .prepare = mdp5_crtc_prepare, .commit = mdp5_crtc_commit, - .mode_set_base = mdp5_crtc_mode_set_base, .load_lut = mdp5_crtc_load_lut, + .atomic_check = mdp5_crtc_atomic_check, + .atomic_begin = mdp5_crtc_atomic_begin, + .atomic_flush = mdp5_crtc_atomic_flush, }; static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); struct drm_crtc *crtc = &mdp5_crtc->base; - struct msm_drm_private *priv = crtc->dev->dev_private; unsigned pending; mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); @@ -414,16 +424,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) if (pending & PENDING_FLIP) { complete_flip(crtc, NULL); - drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq); } } static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); - struct drm_crtc *crtc = &mdp5_crtc->base; + DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); - crtc_flush(crtc); } uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) @@ -444,10 +452,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - static const enum mdp5_intfnum intfnum[] = { - INTF0, INTF1, INTF2, INTF3, - }; + uint32_t flush_mask = 0; uint32_t intf_sel; + unsigned long flags; /* now that we know what irq's we want: */ mdp5_crtc->err.irqmask = intf2err(intf); @@ -457,6 +464,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, if (!mdp5_kms) return; + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); switch (intf) { @@ -481,39 +489,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, break; } - blend_setup(crtc); + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); + mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); + flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); + flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm); - mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); - mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id), - MDP5_CTL_OP_MODE(MODE_NONE) | - MDP5_CTL_OP_INTF_NUM(intfnum[intf])); - - crtc_flush(crtc); + crtc_flush(crtc, flush_mask); } -static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, - struct drm_plane *plane) +int mdp5_crtc_get_lm(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - blend_setup(crtc); - if (mdp5_crtc->enabled && (plane != crtc->primary)) - crtc_flush(crtc); -} + if (WARN_ON(!crtc)) + return -EINVAL; -void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) -{ - set_attach(crtc, mdp5_plane_pipe(plane), plane); -} - -void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) -{ - /* don't actually detatch our primary plane: */ - if (crtc->primary == plane) - return; - set_attach(crtc, mdp5_plane_pipe(plane), NULL); + return mdp5_crtc->lm; } /* initialize crtc */ @@ -530,6 +524,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, crtc = &mdp5_crtc->base; mdp5_crtc->id = id; + mdp5_crtc->lm = GET_LM_ID(id); + + spin_lock_init(&mdp5_crtc->lm_lock); mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; @@ -537,11 +534,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", pipe2name(mdp5_plane_pipe(plane)), id); - drm_flip_work_init(&mdp5_crtc->unref_fb_work, - "unref fb", unref_fb_worker); - - INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); plane->crtc = crtc; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c new file mode 100644 index 000000000000..dea4505ac963 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mdp5_kms.h" +#include "mdp5_ctl.h" + +/* + * CTL - MDP Control Pool Manager + * + * Controls are shared between all CRTCs. + * + * They are intended to be used for data path configuration. + * The top level register programming describes the complete data path for + * a specific data path ID - REG_MDP5_CTL_*(<id>, ...) + * + * Hardware capabilities determine the number of concurrent data paths + * + * In certain use cases (high-resolution dual pipe), one single CTL can be + * shared across multiple CRTCs. + * + * Because the number of CTLs can be less than the number of CRTCs, + * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is + * requested by the client (in mdp5_crtc_mode_set()). + */ + +struct mdp5_ctl { + struct mdp5_ctl_manager *ctlm; + + u32 id; + + /* whether this CTL has been allocated or not: */ + bool busy; + + /* memory output connection (@see mdp5_ctl_mode): */ + u32 mode; + + /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ + spinlock_t hw_lock; + u32 reg_offset; + + /* flush mask used to commit CTL registers */ + u32 flush_mask; + + bool cursor_on; + + struct drm_crtc *crtc; +}; + +struct mdp5_ctl_manager { + struct drm_device *dev; + + /* number of CTL / Layer Mixers in this hw config: */ + u32 nlm; + u32 nctl; + + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ + spinlock_t pool_lock; + struct mdp5_ctl ctls[MAX_CTL]; +}; + +static inline +struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr) +{ + struct msm_drm_private *priv = ctl_mgr->dev->dev_private; + + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static inline +void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + + (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ + mdp5_write(mdp5_kms, reg, data); +} + +static inline +u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + + (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ + return mdp5_read(mdp5_kms, reg); +} + + +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf) +{ + unsigned long flags; + static const enum mdp5_intfnum intfnum[] = { + INTF0, INTF1, INTF2, INTF3, + }; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), + MDP5_CTL_OP_MODE(ctl->mode) | + MDP5_CTL_OP_INTF_NUM(intfnum[intf])); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + return 0; +} + +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + u32 blend_cfg; + int lm; + + lm = mdp5_crtc_get_lm(ctl->crtc); + if (unlikely(WARN_ON(lm < 0))) { + dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", + ctl->id, lm); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->hw_lock, flags); + + blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm)); + + if (enable) + blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; + else + blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; + + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + ctl->cursor_on = enable; + + return 0; +} + + +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) +{ + unsigned long flags; + + if (ctl->cursor_on) + blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; + else + blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + return 0; +} + +int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + + if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) { + int lm = mdp5_crtc_get_lm(ctl->crtc); + + if (unlikely(WARN_ON(lm < 0))) { + dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", + ctl->id, lm); + return -EINVAL; + } + + /* for current targets, cursor bit is the same as LM bit */ + flush_mask |= mdp_ctl_flush_mask_lm(lm); + } + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + return 0; +} + +u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl) +{ + return ctl->flush_mask; +} + +void mdp5_ctl_release(struct mdp5_ctl *ctl) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + + if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) { + dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)", + ctl->id, ctl->busy); + return; + } + + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + ctl->busy = false; + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + + DBG("CTL %d released", ctl->id); +} + +/* + * mdp5_ctl_request() - CTL dynamic allocation + * + * Note: Current implementation considers that we can only have one CRTC per CTL + * + * @return first free CTL + */ +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, + struct drm_crtc *crtc) +{ + struct mdp5_ctl *ctl = NULL; + unsigned long flags; + int c; + + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + + for (c = 0; c < ctl_mgr->nctl; c++) + if (!ctl_mgr->ctls[c].busy) + break; + + if (unlikely(c >= ctl_mgr->nctl)) { + dev_err(ctl_mgr->dev->dev, "No more CTL available!"); + goto unlock; + } + + ctl = &ctl_mgr->ctls[c]; + + ctl->crtc = crtc; + ctl->busy = true; + DBG("CTL %d allocated", ctl->id); + +unlock: + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + return ctl; +} + +void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) +{ + unsigned long flags; + int c; + + for (c = 0; c < ctl_mgr->nctl; c++) { + struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + } +} + +void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) +{ + kfree(ctl_mgr); +} + +struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, + void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) +{ + struct mdp5_ctl_manager *ctl_mgr; + const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl; + unsigned long flags; + int c, ret; + + ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); + if (!ctl_mgr) { + dev_err(dev->dev, "failed to allocate CTL manager\n"); + ret = -ENOMEM; + goto fail; + } + + if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) { + dev_err(dev->dev, "Increase static pool size to at least %d\n", + ctl_cfg->count); + ret = -ENOSPC; + goto fail; + } + + /* initialize the CTL manager: */ + ctl_mgr->dev = dev; + ctl_mgr->nlm = hw_cfg->lm.count; + ctl_mgr->nctl = ctl_cfg->count; + spin_lock_init(&ctl_mgr->pool_lock); + + /* initialize each CTL of the pool: */ + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + for (c = 0; c < ctl_mgr->nctl; c++) { + struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; + + if (WARN_ON(!ctl_cfg->base[c])) { + dev_err(dev->dev, "CTL_%d: base is null!\n", c); + ret = -EINVAL; + goto fail; + } + ctl->ctlm = ctl_mgr; + ctl->id = c; + ctl->mode = MODE_NONE; + ctl->reg_offset = ctl_cfg->base[c]; + ctl->flush_mask = MDP5_CTL_FLUSH_CTL; + ctl->busy = false; + spin_lock_init(&ctl->hw_lock); + } + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + DBG("Pool of %d CTLs created.", ctl_mgr->nctl); + + return ctl_mgr; + +fail: + if (ctl_mgr) + mdp5_ctlm_destroy(ctl_mgr); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h new file mode 100644 index 000000000000..1018519b6af2 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDP5_CTL_H__ +#define __MDP5_CTL_H__ + +#include "msm_drv.h" + +/* + * CTL Manager prototypes: + * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler, + * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions. + */ +struct mdp5_ctl_manager; +struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, + void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg); +void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); +void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); + +/* + * CTL prototypes: + * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, + * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. + */ +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); + +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf); + +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable); + +/* @blend_cfg: see LM blender config definition below */ +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg); + +/* @flush_mask: see CTL flush masks definitions below */ +int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); +u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl); + +void mdp5_ctl_release(struct mdp5_ctl *ctl); + +/* + * blend_cfg (LM blender config): + * + * The function below allows the caller of mdp5_ctl_blend() to specify how pipes + * are being blended according to their stage (z-order), through @blend_cfg arg. + */ +static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); + case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); + case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); + case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); + case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); + case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); + case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); + case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); + case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); + case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + default: return 0; + } +} + +/* + * flush_mask (CTL flush masks): + * + * The following functions allow each DRM entity to get and store + * their own flush mask. + * Once stored, these masks will then be accessed through each DRM's + * interface and used by the caller of mdp5_ctl_commit() to specify + * which block(s) need to be flushed through @flush_mask parameter. + */ + +#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000 + +static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id) +{ + /* TODO: use id once multiple cursor support is present */ + (void)cursor_id; + + return MDP5_CTL_FLUSH_CURSOR_DUMMY; +} + +static inline u32 mdp_ctl_flush_mask_lm(int lm) +{ + switch (lm) { + case 0: return MDP5_CTL_FLUSH_LM0; + case 1: return MDP5_CTL_FLUSH_LM1; + case 2: return MDP5_CTL_FLUSH_LM2; + case 5: return MDP5_CTL_FLUSH_LM5; + default: return 0; + } +} + +static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; + case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; + case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; + case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; + case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; + case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; + case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; + case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; + case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; + case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; + default: return 0; + } +} + +#endif /* __MDP5_CTL_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index edec7bfaa952..0254bfdeb92f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -24,6 +24,7 @@ struct mdp5_encoder { struct drm_encoder base; int intf; enum mdp5_intf intf_id; + spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ bool enabled; uint32_t bsc; }; @@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) struct mdp5_kms *mdp5_kms = get_kms(encoder); int intf = mdp5_encoder->intf; bool enabled = (mode == DRM_MODE_DPMS_ON); + unsigned long flags; DBG("mode=%d", mode); @@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) if (enabled) { bs_set(mdp5_encoder, 1); + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); } else { + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); + bs_set(mdp5_encoder, 0); } @@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, uint32_t display_v_start, display_v_end; uint32_t hsync_start_x, hsync_end_x; uint32_t format; + unsigned long flags; mode = adjusted_mode; @@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); @@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ + + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); } static void mdp5_encoder_prepare(struct drm_encoder *encoder) @@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, mdp5_encoder->intf_id = intf_id; encoder = &mdp5_encoder->base; + spin_lock_init(&mdp5_encoder->intf_lock); + drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 812c59bbaf7f..70ac81edd40f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -15,6 +15,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/irqdomain.h> +#include <linux/irq.h> #include "msm_drv.h" #include "mdp5_kms.h" @@ -82,18 +84,23 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) { struct mdp_kms *mdp_kms = to_mdp_kms(kms); struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); - struct msm_drm_private *priv = mdp5_kms->dev->dev_private; uint32_t intr; intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); VERB("intr=%08x", intr); - if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) + if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) { mdp5_irq_mdp(mdp_kms); + intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP; + } - if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) - hdmi_irq(0, priv->hdmi); + while (intr) { + irq_hw_number_t hwirq = fls(intr) - 1; + generic_handle_irq(irq_find_mapping( + mdp5_kms->irqcontroller.domain, hwirq)); + intr &= ~(1 << hwirq); + } return IRQ_HANDLED; } @@ -110,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), false); } + +/* + * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc) + * can register to get their irq's delivered + */ + +#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \ + MDP5_HW_INTR_STATUS_INTR_DSI1 | \ + MDP5_HW_INTR_STATUS_INTR_HDMI | \ + MDP5_HW_INTR_STATUS_INTR_EDP) + +static void mdp5_hw_mask_irq(struct irq_data *irqd) +{ + struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd); + smp_mb__before_atomic(); + clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static void mdp5_hw_unmask_irq(struct irq_data *irqd) +{ + struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd); + smp_mb__before_atomic(); + set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask); + smp_mb__after_atomic(); +} + +static struct irq_chip mdp5_hw_irq_chip = { + .name = "mdp5", + .irq_mask = mdp5_hw_mask_irq, + .irq_unmask = mdp5_hw_unmask_irq, +}; + +static int mdp5_hw_irqdomain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + struct mdp5_kms *mdp5_kms = d->host_data; + + if (!(VALID_IRQS & (1 << hwirq))) + return -EPERM; + + irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq); + irq_set_chip_data(irq, mdp5_kms); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +static struct irq_domain_ops mdp5_hw_irqdomain_ops = { + .map = mdp5_hw_irqdomain_map, + .xlate = irq_domain_xlate_onecell, +}; + + +int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms) +{ + struct device *dev = mdp5_kms->dev->dev; + struct irq_domain *d; + + d = irq_domain_add_linear(dev->of_node, 32, + &mdp5_hw_irqdomain_ops, mdp5_kms); + if (!d) { + dev_err(dev, "mdp5 irq domain add failed\n"); + return -ENXIO; + } + + mdp5_kms->irqcontroller.enabled_mask = 0; + mdp5_kms->irqcontroller.domain = d; + + return 0; +} + +void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms) +{ + if (mdp5_kms->irqcontroller.domain) { + irq_domain_remove(mdp5_kms->irqcontroller.domain); + mdp5_kms->irqcontroller.domain = NULL; + } +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index ce0308124a72..a11f1b80c488 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -24,145 +25,11 @@ static const char *iommu_ports[] = { "mdp_0", }; -static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); - -const struct mdp5_config *mdp5_cfg; - -static const struct mdp5_config msm8x74_config = { - .name = "msm8x74", - .ctl = { - .count = 5, - .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, - }, - .pipe_vig = { - .count = 3, - .base = { 0x01200, 0x01600, 0x01a00 }, - }, - .pipe_rgb = { - .count = 3, - .base = { 0x01e00, 0x02200, 0x02600 }, - }, - .pipe_dma = { - .count = 2, - .base = { 0x02a00, 0x02e00 }, - }, - .lm = { - .count = 5, - .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, - }, - .dspp = { - .count = 3, - .base = { 0x04600, 0x04a00, 0x04e00 }, - }, - .ad = { - .count = 2, - .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ - }, - .intf = { - .count = 4, - .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, - }, -}; - -static const struct mdp5_config apq8084_config = { - .name = "apq8084", - .ctl = { - .count = 5, - .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, - }, - .pipe_vig = { - .count = 4, - .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, - }, - .pipe_rgb = { - .count = 4, - .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, - }, - .pipe_dma = { - .count = 2, - .base = { 0x03200, 0x03600 }, - }, - .lm = { - .count = 6, - .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, - }, - .dspp = { - .count = 4, - .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 }, - - }, - .ad = { - .count = 3, - .base = { 0x13500, 0x13700, 0x13900 }, - }, - .intf = { - .count = 5, - .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, - }, -}; - -struct mdp5_config_entry { - int revision; - const struct mdp5_config *config; -}; - -static const struct mdp5_config_entry mdp5_configs[] = { - { .revision = 0, .config = &msm8x74_config }, - { .revision = 2, .config = &msm8x74_config }, - { .revision = 3, .config = &apq8084_config }, -}; - -static int mdp5_select_hw_cfg(struct msm_kms *kms) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct drm_device *dev = mdp5_kms->dev; - uint32_t version, major, minor; - int i, ret = 0; - - mdp5_enable(mdp5_kms); - version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); - mdp5_disable(mdp5_kms); - - major = FIELD(version, MDP5_MDP_VERSION_MAJOR); - minor = FIELD(version, MDP5_MDP_VERSION_MINOR); - - DBG("found MDP5 version v%d.%d", major, minor); - - if (major != 1) { - dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n", - major, minor); - ret = -ENXIO; - goto out; - } - - mdp5_kms->rev = minor; - - /* only after mdp5_cfg global pointer's init can we access the hw */ - for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) { - if (mdp5_configs[i].revision != minor) - continue; - mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config; - break; - } - if (unlikely(!mdp5_kms->hw_cfg)) { - dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n", - major, minor); - ret = -ENXIO; - goto out; - } - - DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name); - - return 0; -out: - return ret; -} - static int mdp5_hw_init(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct drm_device *dev = mdp5_kms->dev; - int i; + unsigned long flags; pm_runtime_get_sync(dev->dev); @@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms) * care. */ + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); - for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) - mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0); + mdp5_ctlm_hw_reset(mdp5_kms->ctlm); pm_runtime_put_sync(dev->dev); @@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms) struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct msm_mmu *mmu = mdp5_kms->mmu; + mdp5_irq_domain_fini(mdp5_kms); + if (mmu) { mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); mmu->funcs->destroy(mmu); } + + if (mdp5_kms->ctlm) + mdp5_ctlm_destroy(mdp5_kms->ctlm); + if (mdp5_kms->smp) + mdp5_smp_destroy(mdp5_kms->smp); + if (mdp5_kms->cfg) + mdp5_cfg_destroy(mdp5_kms->cfg); + kfree(mdp5_kms); } @@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) static const enum mdp5_pipe crtcs[] = { SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, }; + static const enum mdp5_pipe pub_planes[] = { + SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, + }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; struct drm_encoder *encoder; + const struct mdp5_cfg_hw *hw_cfg; int i, ret; - /* construct CRTCs: */ - for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) { + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* register our interrupt-controller for hdmi/eDP/dsi/etc + * to use for irqs routed through mdp: + */ + ret = mdp5_irq_domain_init(mdp5_kms); + if (ret) + goto fail; + + /* construct CRTCs and their private planes: */ + for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { struct drm_plane *plane; struct drm_crtc *crtc; - plane = mdp5_plane_init(dev, crtcs[i], true); + plane = mdp5_plane_init(dev, crtcs[i], true, + hw_cfg->pipe_rgb.base[i]); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct plane for %s (%d)\n", @@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) priv->crtcs[priv->num_crtcs++] = crtc; } + /* Construct public planes: */ + for (i = 0; i < hw_cfg->pipe_vig.count; i++) { + struct drm_plane *plane; + + plane = mdp5_plane_init(dev, pub_planes[i], false, + hw_cfg->pipe_vig.base[i]); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + dev_err(dev->dev, "failed to construct %s plane: %d\n", + pipe2name(pub_planes[i]), ret); + goto fail; + } + } + /* Construct encoder for HDMI: */ encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); if (IS_ERR(encoder)) { @@ -338,6 +244,21 @@ fail: return ret; } +static void read_hw_revision(struct mdp5_kms *mdp5_kms, + uint32_t *major, uint32_t *minor) +{ + uint32_t version; + + mdp5_enable(mdp5_kms); + version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); + mdp5_disable(mdp5_kms); + + *major = FIELD(version, MDP5_MDP_VERSION_MAJOR); + *minor = FIELD(version, MDP5_MDP_VERSION_MINOR); + + DBG("MDP5 version v%d.%d", *major, *minor); +} + static int get_clk(struct platform_device *pdev, struct clk **clkp, const char *name) { @@ -354,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, struct msm_kms *mdp5_kms_init(struct drm_device *dev) { struct platform_device *pdev = dev->platformdev; - struct mdp5_platform_config *config = mdp5_get_config(pdev); + struct mdp5_cfg *config; struct mdp5_kms *mdp5_kms; struct msm_kms *kms = NULL; struct msm_mmu *mmu; + uint32_t major, minor; int i, ret; mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); @@ -367,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } + spin_lock_init(&mdp5_kms->resource_lock); + mdp_kms_init(&mdp5_kms->base, &kms_funcs); kms = &mdp5_kms->base.base; mdp5_kms->dev = dev; - mdp5_kms->smp_blk_cnt = config->smp_blk_cnt; mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); if (IS_ERR(mdp5_kms->mmio)) { @@ -417,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) if (ret) goto fail; - ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); + /* we need to set a default rate before enabling. Set a safe + * rate first, then figure out hw revision, and then set a + * more optimal rate: + */ + clk_set_rate(mdp5_kms->src_clk, 200000000); - ret = mdp5_select_hw_cfg(kms); - if (ret) + read_hw_revision(mdp5_kms, &major, &minor); + + mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); + if (IS_ERR(mdp5_kms->cfg)) { + ret = PTR_ERR(mdp5_kms->cfg); + mdp5_kms->cfg = NULL; + goto fail; + } + + config = mdp5_cfg_get_config(mdp5_kms->cfg); + + /* TODO: compute core clock rate at runtime */ + clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); + + mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp); + if (IS_ERR(mdp5_kms->smp)) { + ret = PTR_ERR(mdp5_kms->smp); + mdp5_kms->smp = NULL; goto fail; + } + + mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw); + if (IS_ERR(mdp5_kms->ctlm)) { + ret = PTR_ERR(mdp5_kms->ctlm); + mdp5_kms->ctlm = NULL; + goto fail; + } /* make sure things are off before attaching iommu (bootloader could * have left things on, in which case we'll start getting faults if * we don't disable): */ mdp5_enable(mdp5_kms); - for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) + for (i = 0; i < config->hw->intf.count; i++) mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); mdp5_disable(mdp5_kms); mdelay(16); - if (config->iommu) { - mmu = msm_iommu_new(&pdev->dev, config->iommu); + if (config->platform.iommu) { + mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); dev_err(dev->dev, "failed to init iommu: %d\n", ret); @@ -475,18 +426,3 @@ fail: mdp5_destroy(kms); return ERR_PTR(ret); } - -static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev) -{ - static struct mdp5_platform_config config = {}; -#ifdef CONFIG_OF - /* TODO */ -#endif - config.iommu = iommu_domain_alloc(&platform_bus_type); - /* TODO hard-coded in downstream mdss, but should it be? */ - config.max_clk = 200000000; - /* TODO get from DT: */ - config.smp_blk_cnt = 22; - - return &config; -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index c91101d5ac0f..dd69c77c0d64 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -21,25 +21,9 @@ #include "msm_drv.h" #include "msm_kms.h" #include "mdp/mdp_kms.h" -/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ -#define MDP5_MAX_BASES 8 -struct mdp5_sub_block { - int count; - uint32_t base[MDP5_MAX_BASES]; -}; -struct mdp5_config { - char *name; - struct mdp5_sub_block ctl; - struct mdp5_sub_block pipe_vig; - struct mdp5_sub_block pipe_rgb; - struct mdp5_sub_block pipe_dma; - struct mdp5_sub_block lm; - struct mdp5_sub_block dspp; - struct mdp5_sub_block ad; - struct mdp5_sub_block intf; -}; -extern const struct mdp5_config *mdp5_cfg; +#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ #include "mdp5.xml.h" +#include "mdp5_ctl.h" #include "mdp5_smp.h" struct mdp5_kms { @@ -47,17 +31,14 @@ struct mdp5_kms { struct drm_device *dev; - int rev; - const struct mdp5_config *hw_cfg; + struct mdp5_cfg_handler *cfg; /* mapper-id used to request GEM buffer mapped for scanout: */ int id; struct msm_mmu *mmu; - /* for tracking smp allocation amongst pipes: */ - mdp5_smp_state_t smp_state; - struct mdp5_client_smp_state smp_client_state[CID_MAX]; - int smp_blk_cnt; + struct mdp5_smp *smp; + struct mdp5_ctl_manager *ctlm; /* io/register spaces: */ void __iomem *mmio, *vbif; @@ -71,16 +52,47 @@ struct mdp5_kms { struct clk *lut_clk; struct clk *vsync_clk; + /* + * lock to protect access to global resources: ie., following register: + * - REG_MDP5_DISP_INTF_SEL + */ + spinlock_t resource_lock; + struct mdp_irq error_handler; + + struct { + volatile unsigned long enabled_mask; + struct irq_domain *domain; + } irqcontroller; }; #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) -/* platform config data (ie. from DT, or pdata) */ -struct mdp5_platform_config { - struct iommu_domain *iommu; - uint32_t max_clk; - int smp_blk_cnt; +struct mdp5_plane_state { + struct drm_plane_state base; + + /* "virtual" zpos.. we calculate actual mixer-stage at runtime + * by sorting the attached planes by zpos and then assigning + * mixer stage lowest to highest. Private planes get default + * zpos of zero, and public planes a unique value that is + * greater than zero. This way, things work out if a naive + * userspace assigns planes to a crtc without setting zpos. + */ + int zpos; + + /* the actual mixer stage, calculated in crtc->atomic_check() + * NOTE: this should move to mdp5_crtc_state, when that exists + */ + enum mdp_mixer_stage_id stage; + + /* some additional transactional status to help us know in the + * apply path whether we need to update SMP allocation, and + * whether current update is still pending: + */ + bool mode_changed : 1; + bool pending : 1; }; +#define to_mdp5_plane_state(x) \ + container_of(x, struct mdp5_plane_state, base) static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) { @@ -105,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe) return names[pipe]; } -static inline uint32_t pipe2flush(enum mdp5_pipe pipe) -{ - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; - case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; - case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; - case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; - case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; - case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; - case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; - case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; - case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; - case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; - default: return 0; - } -} - static inline int pipe2nclients(enum mdp5_pipe pipe) { switch (pipe) { @@ -135,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe) } } -static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) -{ - WARN_ON(plane >= pipe2nclients(pipe)); - switch (pipe) { - case SSPP_VIG0: return CID_VIG0_Y + plane; - case SSPP_VIG1: return CID_VIG1_Y + plane; - case SSPP_VIG2: return CID_VIG2_Y + plane; - case SSPP_RGB0: return CID_RGB0; - case SSPP_RGB1: return CID_RGB1; - case SSPP_RGB2: return CID_RGB2; - case SSPP_DMA0: return CID_DMA0_Y + plane; - case SSPP_DMA1: return CID_DMA1_Y + plane; - case SSPP_VIG3: return CID_VIG3_Y + plane; - case SSPP_RGB3: return CID_RGB3; - default: return CID_UNUSED; - } -} - -static inline uint32_t mixer2flush(int lm) -{ - switch (lm) { - case 0: return MDP5_CTL_FLUSH_LM0; - case 1: return MDP5_CTL_FLUSH_LM1; - case 2: return MDP5_CTL_FLUSH_LM2; - default: return 0; - } -} - static inline uint32_t intf2err(int intf) { switch (intf) { @@ -195,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms); irqreturn_t mdp5_irq(struct msm_kms *kms); int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); +void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); static inline uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, @@ -208,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, void mdp5_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj); -void mdp5_plane_set_scanout(struct drm_plane *plane, - struct drm_framebuffer *fb); -int mdp5_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); +uint32_t mdp5_plane_get_flush(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane); + enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); +int mdp5_crtc_get_lm(struct drm_crtc *crtc); void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, enum mdp5_intf intf_id); -void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); -void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_plane *plane, int id); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index f3daec4412ad..533df7caa310 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -17,6 +18,7 @@ #include "mdp5_kms.h" +#define MAX_PLANE 4 struct mdp5_plane { struct drm_plane base; @@ -24,6 +26,11 @@ struct mdp5_plane { enum mdp5_pipe pipe; + spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ + uint32_t reg_offset; + + uint32_t flush_mask; /* used to commit pipe registers */ + uint32_t nformats; uint32_t formats[32]; @@ -31,31 +38,24 @@ struct mdp5_plane { }; #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) +static int mdp5_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +static void set_scanout_locked(struct drm_plane *plane, + struct drm_framebuffer *fb); + static struct mdp5_kms *get_kms(struct drm_plane *plane) { struct msm_drm_private *priv = plane->dev->dev_private; return to_mdp5_kms(to_mdp_kms(priv->kms)); } -static int mdp5_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +static bool plane_enabled(struct drm_plane_state *state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - - mdp5_plane->enabled = true; - - if (plane->fb) - drm_framebuffer_unreference(plane->fb); - - drm_framebuffer_reference(fb); - - return mdp5_plane_mode_set(plane, crtc, fb, - crtc_x, crtc_y, crtc_w, crtc_h, - src_x, src_y, src_w, src_h); + return state->fb && state->crtc; } static int mdp5_plane_disable(struct drm_plane *plane) @@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane) struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); enum mdp5_pipe pipe = mdp5_plane->pipe; - int i; DBG("%s: disable", mdp5_plane->name); - /* update our SMP request to zero (release all our blks): */ - for (i = 0; i < pipe2nclients(pipe); i++) - mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); - - /* TODO detaching now will cause us not to get the last - * vblank and mdp5_smp_commit().. so other planes will - * still see smp blocks previously allocated to us as - * in-use.. - */ - if (plane->crtc) - mdp5_crtc_detach(plane->crtc, plane); + if (mdp5_kms) { + /* Release the memory we requested earlier from the SMP: */ + mdp5_smp_release(mdp5_kms->smp, pipe); + } return 0; } @@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane) static void mdp5_plane_destroy(struct drm_plane *plane) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - struct msm_drm_private *priv = plane->dev->dev_private; - - if (priv->kms) - mdp5_plane_disable(plane); + drm_plane_helper_disable(plane); drm_plane_cleanup(plane); kfree(mdp5_plane); @@ -109,109 +98,185 @@ int mdp5_plane_set_property(struct drm_plane *plane, return -EINVAL; } +static void mdp5_plane_reset(struct drm_plane *plane) +{ + struct mdp5_plane_state *mdp5_state; + + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); + + kfree(to_mdp5_plane_state(plane->state)); + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + mdp5_state->zpos = 0; + } else { + mdp5_state->zpos = 1 + drm_plane_index(plane); + } + + plane->state = &mdp5_state->base; +} + +static struct drm_plane_state * +mdp5_plane_duplicate_state(struct drm_plane *plane) +{ + struct mdp5_plane_state *mdp5_state; + + if (WARN_ON(!plane->state)) + return NULL; + + mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), + sizeof(*mdp5_state), GFP_KERNEL); + + if (mdp5_state && mdp5_state->base.fb) + drm_framebuffer_reference(mdp5_state->base.fb); + + mdp5_state->mode_changed = false; + mdp5_state->pending = false; + + return &mdp5_state->base; +} + +static void mdp5_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_unreference(state->fb); + + kfree(to_mdp5_plane_state(state)); +} + static const struct drm_plane_funcs mdp5_plane_funcs = { - .update_plane = mdp5_plane_update, - .disable_plane = mdp5_plane_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = mdp5_plane_destroy, .set_property = mdp5_plane_set_property, + .reset = mdp5_plane_reset, + .atomic_duplicate_state = mdp5_plane_duplicate_state, + .atomic_destroy_state = mdp5_plane_destroy_state, }; -void mdp5_plane_set_scanout(struct drm_plane *plane, +static int mdp5_plane_prepare_fb(struct drm_plane *plane, struct drm_framebuffer *fb) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - uint32_t nplanes = drm_format_num_planes(fb->pixel_format); - uint32_t iova[4]; - int i; - - for (i = 0; i < nplanes; i++) { - struct drm_gem_object *bo = msm_framebuffer_bo(fb, i); - msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]); - } - for (; i < 4; i++) - iova[i] = 0; - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), - MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | - MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), - MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | - MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]); - - plane->fb = fb; + DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id); + return msm_framebuffer_prepare(fb, mdp5_kms->id); } -/* NOTE: looks like if horizontal decimation is used (if we supported that) - * then the width used to calculate SMP block requirements is the post- - * decimated width. Ie. SMP buffering sits downstream of decimation (which - * presumably happens during the dma from scanout buffer). - */ -static int request_smp_blocks(struct drm_plane *plane, uint32_t format, - uint32_t nplanes, uint32_t width) +static void mdp5_plane_cleanup_fb(struct drm_plane *plane, + struct drm_framebuffer *fb) { - struct drm_device *dev = plane->dev; struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - int i, hsub, nlines, nblks, ret; - hsub = drm_format_horz_chroma_subsampling(format); + DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id); + msm_framebuffer_cleanup(fb, mdp5_kms->id); +} - /* different if BWC (compressed framebuffer?) enabled: */ - nlines = 2; +static int mdp5_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct drm_plane_state *old_state = plane->state; - for (i = 0, nblks = 0; i < nplanes; i++) { - int n, fetch_stride, cpp; + DBG("%s: check (%d -> %d)", mdp5_plane->name, + plane_enabled(old_state), plane_enabled(state)); - cpp = drm_format_plane_cpp(format, i); - fetch_stride = width * cpp / (i ? hsub : 1); + if (plane_enabled(state) && plane_enabled(old_state)) { + /* we cannot change SMP block configuration during scanout: */ + bool full_modeset = false; + if (state->fb->pixel_format != old_state->fb->pixel_format) { + DBG("%s: pixel_format change!", mdp5_plane->name); + full_modeset = true; + } + if (state->src_w != old_state->src_w) { + DBG("%s: src_w change!", mdp5_plane->name); + full_modeset = true; + } + if (to_mdp5_plane_state(old_state)->pending) { + DBG("%s: still pending!", mdp5_plane->name); + full_modeset = true; + } + if (full_modeset) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state->mode_changed = true; + to_mdp5_plane_state(state)->mode_changed = true; + } + } else { + to_mdp5_plane_state(state)->mode_changed = true; + } - n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); + return 0; +} - /* for hw rev v1.00 */ - if (mdp5_kms->rev == 0) - n = roundup_pow_of_two(n); +static void mdp5_plane_atomic_update(struct drm_plane *plane) +{ + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct drm_plane_state *state = plane->state; - DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); - ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n); - if (ret) { - dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n", - n, ret); - return ret; - } + DBG("%s: update", mdp5_plane->name); - nblks += n; + if (!plane_enabled(state)) { + to_mdp5_plane_state(state)->pending = true; + mdp5_plane_disable(plane); + } else if (to_mdp5_plane_state(state)->mode_changed) { + int ret; + to_mdp5_plane_state(state)->pending = true; + ret = mdp5_plane_mode_set(plane, + state->crtc, state->fb, + state->crtc_x, state->crtc_y, + state->crtc_w, state->crtc_h, + state->src_x, state->src_y, + state->src_w, state->src_h); + /* atomic_check should have ensured that this doesn't fail */ + WARN_ON(ret < 0); + } else { + unsigned long flags; + spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); + set_scanout_locked(plane, state->fb); + spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); } - - /* in success case, return total # of blocks allocated: */ - return nblks; } -static void set_fifo_thresholds(struct drm_plane *plane, int nblks) +static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { + .prepare_fb = mdp5_plane_prepare_fb, + .cleanup_fb = mdp5_plane_cleanup_fb, + .atomic_check = mdp5_plane_atomic_check, + .atomic_update = mdp5_plane_atomic_update, +}; + +static void set_scanout_locked(struct drm_plane *plane, + struct drm_framebuffer *fb) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); enum mdp5_pipe pipe = mdp5_plane->pipe; - uint32_t val; - /* 1/4 of SMP pool that is being fetched */ - val = (nblks * SMP_ENTRIES_PER_BLK) / 4; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), + MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | + MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), + MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | + MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), + msm_framebuffer_iova(fb, mdp5_kms->id, 0)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), + msm_framebuffer_iova(fb, mdp5_kms->id, 1)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), + msm_framebuffer_iova(fb, mdp5_kms->id, 2)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), + msm_framebuffer_iova(fb, mdp5_kms->id, 4)); + plane->fb = fb; } -int mdp5_plane_mode_set(struct drm_plane *plane, +static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, @@ -225,7 +290,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t nplanes, config = 0; uint32_t phasex_step = 0, phasey_step = 0; uint32_t hdecm = 0, vdecm = 0; - int i, nblks; + unsigned long flags; + int ret; nplanes = drm_format_num_planes(fb->pixel_format); @@ -243,12 +309,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - /* - * Calculate and request required # of smp blocks: - */ - nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); - if (nblks < 0) - return nblks; + /* Request some memory from the SMP: */ + ret = mdp5_smp_request(mdp5_kms->smp, + mdp5_plane->pipe, fb->pixel_format, src_w); + if (ret) + return ret; /* * Currently we update the hw for allocations/requests immediately, @@ -256,8 +321,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane, * would move into atomic->check_plane_state(), while updating the * hw would remain here: */ - for (i = 0; i < pipe2nclients(pipe); i++) - mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i)); + mdp5_smp_configure(mdp5_kms->smp, pipe); if (src_w != crtc_w) { config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; @@ -269,6 +333,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane, /* TODO calc phasey_step, vdecm */ } + spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); @@ -289,8 +355,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_OUT_XY_X(crtc_x) | MDP5_PIPE_OUT_XY_Y(crtc_y)); - mdp5_plane_set_scanout(plane, fb); - format = to_mdp_format(msm_framebuffer_format(fb)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), @@ -330,22 +394,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); - set_fifo_thresholds(plane, nblks); + set_scanout_locked(plane, fb); - /* TODO detach from old crtc (if we had more than one) */ - mdp5_crtc_attach(crtc, plane); + spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); - return 0; + return ret; } void mdp5_plane_complete_flip(struct drm_plane *plane) { struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; - int i; + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + enum mdp5_pipe pipe = mdp5_plane->pipe; + + DBG("%s: complete flip", mdp5_plane->name); - for (i = 0; i < pipe2nclients(pipe); i++) - mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); + mdp5_smp_commit(mdp5_kms->smp, pipe); + + to_mdp5_plane_state(plane->state)->pending = false; } enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) @@ -354,9 +420,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) return mdp5_plane->pipe; } +uint32_t mdp5_plane_get_flush(struct drm_plane *plane) +{ + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + + return mdp5_plane->flush_mask; +} + /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane) + enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; @@ -377,10 +450,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, ARRAY_SIZE(mdp5_plane->formats)); + mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe); + mdp5_plane->reg_offset = reg_offset; + spin_lock_init(&mdp5_plane->pipe_lock); + type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; - drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, + ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, mdp5_plane->formats, mdp5_plane->nformats, type); + if (ret) + goto fail; + + drm_plane_helper_add(plane, &mdp5_plane_helper_funcs); mdp5_plane_install_properties(plane, &plane->base); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 2d0236b963a6..bf551885e019 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -29,8 +30,11 @@ * Based on the size of the attached scanout buffer, a certain # of * blocks must be allocated to that client out of the shared pool. * - * For each block, it can be either free, or pending/in-use by a - * client. The updates happen in three steps: + * In some hw, some blocks are statically allocated for certain pipes + * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). + * + * For each block that can be dynamically allocated, it can be either + * free, or pending/in-use by a client. The updates happen in three steps: * * 1) mdp5_smp_request(): * When plane scanout is setup, calculate required number of @@ -61,21 +65,68 @@ * inuse and pending state of all clients.. */ -static DEFINE_SPINLOCK(smp_lock); +struct mdp5_smp { + struct drm_device *dev; + + int blk_cnt; + int blk_size; + + spinlock_t state_lock; + mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */ + + struct mdp5_client_smp_state client_state[CID_MAX]; +}; +static inline +struct mdp5_kms *get_kms(struct mdp5_smp *smp) +{ + struct msm_drm_private *priv = smp->dev->dev_private; + + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) +{ + WARN_ON(plane >= pipe2nclients(pipe)); + switch (pipe) { + case SSPP_VIG0: return CID_VIG0_Y + plane; + case SSPP_VIG1: return CID_VIG1_Y + plane; + case SSPP_VIG2: return CID_VIG2_Y + plane; + case SSPP_RGB0: return CID_RGB0; + case SSPP_RGB1: return CID_RGB1; + case SSPP_RGB2: return CID_RGB2; + case SSPP_DMA0: return CID_DMA0_Y + plane; + case SSPP_DMA1: return CID_DMA1_Y + plane; + case SSPP_VIG3: return CID_VIG3_Y + plane; + case SSPP_RGB3: return CID_RGB3; + default: return CID_UNUSED; + } +} /* step #1: update # of blocks pending for the client: */ -int mdp5_smp_request(struct mdp5_kms *mdp5_kms, +static int smp_request_block(struct mdp5_smp *smp, enum mdp5_client_id cid, int nblks) { - struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; - int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; + struct mdp5_kms *mdp5_kms = get_kms(smp); + const struct mdp5_cfg_hw *hw_cfg; + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; + int reserved; unsigned long flags; - spin_lock_irqsave(&smp_lock, flags); + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + reserved = hw_cfg->smp.reserved[cid]; + + spin_lock_irqsave(&smp->state_lock, flags); - avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); + nblks -= reserved; + if (reserved) + DBG("%d MMBs allocated (%d reserved)", nblks, reserved); + + avail = cnt - bitmap_weight(smp->state, cnt); if (nblks > avail) { + dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n", + nblks, avail); ret = -ENOSPC; goto fail; } @@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms, if (nblks > cur_nblks) { /* grow the existing pending reservation: */ for (i = cur_nblks; i < nblks; i++) { - int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); + int blk = find_first_zero_bit(smp->state, cnt); set_bit(blk, ps->pending); - set_bit(blk, mdp5_kms->smp_state); + set_bit(blk, smp->state); } } else { /* shrink the existing pending reservation: */ @@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms, } fail: - spin_unlock_irqrestore(&smp_lock, flags); + spin_unlock_irqrestore(&smp->state_lock, flags); + return 0; +} + +static void set_fifo_thresholds(struct mdp5_smp *smp, + enum mdp5_pipe pipe, int nblks) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); + u32 val; + + /* 1/4 of SMP pool that is being fetched */ + val = (nblks * smp_entries_per_blk) / 4; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); +} + +/* + * NOTE: looks like if horizontal decimation is used (if we supported that) + * then the width used to calculate SMP block requirements is the post- + * decimated width. Ie. SMP buffering sits downstream of decimation (which + * presumably happens during the dma from scanout buffer). + */ +int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct drm_device *dev = mdp5_kms->dev; + int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); + int i, hsub, nplanes, nlines, nblks, ret; + + nplanes = drm_format_num_planes(fmt); + hsub = drm_format_horz_chroma_subsampling(fmt); + + /* different if BWC (compressed framebuffer?) enabled: */ + nlines = 2; + + for (i = 0, nblks = 0; i < nplanes; i++) { + int n, fetch_stride, cpp; + + cpp = drm_format_plane_cpp(fmt, i); + fetch_stride = width * cpp / (i ? hsub : 1); + + n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); + + /* for hw rev v1.00 */ + if (rev == 0) + n = roundup_pow_of_two(n); + + DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); + ret = smp_request_block(smp, pipe2client(pipe, i), n); + if (ret) { + dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", + n, ret); + return ret; + } + + nblks += n; + } + + set_fifo_thresholds(smp, pipe, nblks); + return 0; } -static void update_smp_state(struct mdp5_kms *mdp5_kms, +/* Release SMP blocks for all clients of the pipe */ +void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) +{ + int i, nblks; + + for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++) + smp_request_block(smp, pipe2client(pipe, i), 0); + set_fifo_thresholds(smp, pipe, 0); +} + +static void update_smp_state(struct mdp5_smp *smp, enum mdp5_client_id cid, mdp5_smp_state_t *assigned) { - int cnt = mdp5_kms->smp_blk_cnt; - uint32_t blk, val; + struct mdp5_kms *mdp5_kms = get_kms(smp); + int cnt = smp->blk_cnt; + u32 blk, val; for_each_set_bit(blk, *assigned, cnt) { int idx = blk / 3; @@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms, } /* step #2: configure hw for union(pending, inuse): */ -void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) +void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) { - struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; - int cnt = mdp5_kms->smp_blk_cnt; + int cnt = smp->blk_cnt; mdp5_smp_state_t assigned; + int i; - bitmap_or(assigned, ps->inuse, ps->pending, cnt); - update_smp_state(mdp5_kms, cid, &assigned); + for (i = 0; i < pipe2nclients(pipe); i++) { + enum mdp5_client_id cid = pipe2client(pipe, i); + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + + bitmap_or(assigned, ps->inuse, ps->pending, cnt); + update_smp_state(smp, cid, &assigned); + } } /* step #3: after vblank, copy pending -> inuse: */ -void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) +void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) { - struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; - int cnt = mdp5_kms->smp_blk_cnt; + int cnt = smp->blk_cnt; mdp5_smp_state_t released; + int i; + + for (i = 0; i < pipe2nclients(pipe); i++) { + enum mdp5_client_id cid = pipe2client(pipe, i); + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + + /* + * Figure out if there are any blocks we where previously + * using, which can be released and made available to other + * clients: + */ + if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { + unsigned long flags; + + spin_lock_irqsave(&smp->state_lock, flags); + /* clear released blocks: */ + bitmap_andnot(smp->state, smp->state, released, cnt); + spin_unlock_irqrestore(&smp->state_lock, flags); - /* - * Figure out if there are any blocks we where previously - * using, which can be released and made available to other - * clients: - */ - if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { - unsigned long flags; - - spin_lock_irqsave(&smp_lock, flags); - /* clear released blocks: */ - bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state, - released, cnt); - spin_unlock_irqrestore(&smp_lock, flags); - - update_smp_state(mdp5_kms, CID_UNUSED, &released); + update_smp_state(smp, CID_UNUSED, &released); + } + + bitmap_copy(ps->inuse, ps->pending, cnt); } +} + +void mdp5_smp_destroy(struct mdp5_smp *smp) +{ + kfree(smp); +} + +struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg) +{ + struct mdp5_smp *smp = NULL; + int ret; + + smp = kzalloc(sizeof(*smp), GFP_KERNEL); + if (unlikely(!smp)) { + ret = -ENOMEM; + goto fail; + } + + smp->dev = dev; + smp->blk_cnt = cfg->mmb_count; + smp->blk_size = cfg->mmb_size; + + /* statically tied MMBs cannot be re-allocated: */ + bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); + spin_lock_init(&smp->state_lock); + + return smp; +fail: + if (smp) + mdp5_smp_destroy(smp); - bitmap_copy(ps->inuse, ps->pending, cnt); + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index 0ab739e1a1dd..e47179f63585 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -20,22 +21,26 @@ #include "msm_drv.h" -#define MAX_SMP_BLOCKS 22 -#define SMP_BLK_SIZE 4096 -#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16) - -typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); - struct mdp5_client_smp_state { mdp5_smp_state_t inuse; mdp5_smp_state_t pending; }; struct mdp5_kms; +struct mdp5_smp; + +/* + * SMP module prototypes: + * mdp5_smp_init() returns a SMP @handler, + * which is then used to call the other mdp5_smp_*(handler, ...) functions. + */ -int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); -void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); -void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); +struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg); +void mdp5_smp_destroy(struct mdp5_smp *smp); +int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width); +void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe); +void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe); +void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe); #endif /* __MDP5_SMP_H__ */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 8cf3361daba3..f0de412e13dc 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -117,7 +117,7 @@ int msm_atomic_commit(struct drm_device *dev, if (!plane) continue; - if (plane->state->fb != new_state->fb) + if ((plane->state->fb != new_state->fb) && new_state->fb) add_fb(c, new_state->fb); } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 4b52d752bb6e..136303818436 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -215,7 +215,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); struct hdmi; int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, struct drm_encoder *encoder); -irqreturn_t hdmi_irq(int irq, void *dev_id); void __init hdmi_register(void); void __exit hdmi_unregister(void); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index f4e42d506ff7..84dec161d836 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -120,6 +120,8 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); + if (!msm_fb->planes[plane]) + return 0; return msm_gem_iova(msm_fb->planes[plane], id); } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 15a0fec99c70..7fb4876388e7 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -68,6 +68,24 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev); /* TODO move these helper iterator macro somewhere common: */ #define for_each_plane_on_crtc(_crtc, _plane) \ list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \ - if ((_plane)->crtc == (_crtc)) + if ((_plane)->state->crtc == (_crtc)) + +static inline bool +__plane_will_be_attached_to_crtc(struct drm_atomic_state *state, + struct drm_plane *plane, struct drm_crtc *crtc) +{ + int idx = drm_plane_index(plane); + + /* if plane is modified in incoming state, use the new state: */ + if (state->plane_states[idx]) + return state->plane_states[idx]->crtc == crtc; + + /* otherwise, current state: */ + return plane->state->crtc == crtc; +} + +#define for_each_pending_plane_on_crtc(_state, _crtc, _plane) \ + list_for_each_entry((_plane), &(_crtc)->dev->mode_config.plane_list, head) \ + if (__plane_will_be_attached_to_crtc((_state), (_plane), (_crtc))) #endif /* __MSM_KMS_H__ */ |