diff options
author | Vinod Koul <vinod.koul@intel.com> | 2016-12-14 09:06:54 +0530 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-12-14 09:06:54 +0530 |
commit | 90644ad7f2c0292b1a2a98d733bd17f449bb9885 (patch) | |
tree | c9d7b6f53f779de5ca00f21be130c405237be068 /drivers/dma | |
parent | 83cb0dcaf11fa045cecad4cc6310f3bfbf0cf87e (diff) | |
parent | 75ff76687cfd9f62ea4a6d3a86679d5be1439a94 (diff) |
Merge branch 'topic/qcom' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/qcom/hidma.c | 173 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.h | 9 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_dbg.c | 4 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_ll.c | 176 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_mgmt.c | 11 |
5 files changed, 270 insertions, 103 deletions
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index e244e10a94b5..3c982c96b4b7 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -56,6 +56,7 @@ #include <linux/irq.h> #include <linux/atomic.h> #include <linux/pm_runtime.h> +#include <linux/msi.h> #include "../dmaengine.h" #include "hidma.h" @@ -70,6 +71,7 @@ #define HIDMA_ERR_INFO_SW 0xFF #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 #define HIDMA_NR_DEFAULT_DESC 10 +#define HIDMA_MSI_INTS 11 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) { @@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg) return hidma_ll_inthandler(chirq, lldev); } +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN +static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg) +{ + struct hidma_lldev **lldevp = arg; + struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp); + + return hidma_ll_inthandler_msi(chirq, *lldevp, + 1 << (chirq - dmadev->msi_virqbase)); +} +#endif + static ssize_t hidma_show_values(struct device *dev, struct device_attribute *attr, char *buf) { @@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev, return strlen(buf); } -static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, - int mode) +static inline void hidma_sysfs_uninit(struct hidma_dev *dev) +{ + device_remove_file(dev->ddev.dev, dev->chid_attrs); +} + +static struct device_attribute* +hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) { struct device_attribute *attrs; char *name_copy; @@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), GFP_KERNEL); if (!attrs) - return -ENOMEM; + return NULL; name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); if (!name_copy) - return -ENOMEM; + return NULL; attrs->attr.name = name_copy; attrs->attr.mode = mode; attrs->show = hidma_show_values; sysfs_attr_init(&attrs->attr); - return device_create_file(dev->ddev.dev, attrs); + return attrs; +} + +static int hidma_sysfs_init(struct hidma_dev *dev) +{ + dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO); + if (!dev->chid_attrs) + return -ENOMEM; + + return device_create_file(dev->ddev.dev, dev->chid_attrs); +} + +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN +static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct device *dev = msi_desc_to_dev(desc); + struct hidma_dev *dmadev = dev_get_drvdata(dev); + + if (!desc->platform.msi_index) { + writel(msg->address_lo, dmadev->dev_evca + 0x118); + writel(msg->address_hi, dmadev->dev_evca + 0x11C); + writel(msg->data, dmadev->dev_evca + 0x120); + } +} +#endif + +static void hidma_free_msis(struct hidma_dev *dmadev) +{ +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + struct device *dev = dmadev->ddev.dev; + struct msi_desc *desc; + + /* free allocated MSI interrupts above */ + for_each_msi_entry(desc, dev) + devm_free_irq(dev, desc->irq, &dmadev->lldev); + + platform_msi_domain_free_irqs(dev); +#endif +} + +static int hidma_request_msi(struct hidma_dev *dmadev, + struct platform_device *pdev) +{ +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + int rc; + struct msi_desc *desc; + struct msi_desc *failed_desc = NULL; + + rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, + hidma_write_msi_msg); + if (rc) + return rc; + + for_each_msi_entry(desc, &pdev->dev) { + if (!desc->platform.msi_index) + dmadev->msi_virqbase = desc->irq; + + rc = devm_request_irq(&pdev->dev, desc->irq, + hidma_chirq_handler_msi, + 0, "qcom-hidma-msi", + &dmadev->lldev); + if (rc) { + failed_desc = desc; + break; + } + } + + if (rc) { + /* free allocated MSI interrupts above */ + for_each_msi_entry(desc, &pdev->dev) { + if (desc == failed_desc) + break; + devm_free_irq(&pdev->dev, desc->irq, + &dmadev->lldev); + } + } else { + /* Add callback to free MSIs on teardown */ + hidma_ll_setup_irq(dmadev->lldev, true); + + } + if (rc) + dev_warn(&pdev->dev, + "failed to request MSI irq, falling back to wired IRQ\n"); + return rc; +#else + return -EINVAL; +#endif +} + +static bool hidma_msi_capable(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + const char *of_compat; + int ret = -EINVAL; + + if (!adev || acpi_disabled) { + ret = device_property_read_string(dev, "compatible", + &of_compat); + if (ret) + return false; + + ret = strcmp(of_compat, "qcom,hidma-1.1"); + } else { +#ifdef CONFIG_ACPI + ret = strcmp(acpi_device_hid(adev), "QCOM8062"); +#endif + } + return ret == 0; } static int hidma_probe(struct platform_device *pdev) @@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev) void __iomem *evca; void __iomem *trca; int rc; + bool msi; pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); @@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev) dmadev->ddev.device_terminate_all = hidma_terminate_all; dmadev->ddev.copy_align = 8; + /* + * Determine the MSI capability of the platform. Old HW doesn't + * support MSI. + */ + msi = hidma_msi_capable(&pdev->dev); + device_property_read_u32(&pdev->dev, "desc-count", &dmadev->nr_descriptors); @@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev) goto dmafree; } - rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, - "qcom-hidma", dmadev->lldev); - if (rc) - goto uninit; + platform_set_drvdata(pdev, dmadev); + if (msi) + rc = hidma_request_msi(dmadev, pdev); + + if (!msi || rc) { + hidma_ll_setup_irq(dmadev->lldev, false); + rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, + 0, "qcom-hidma", dmadev->lldev); + if (rc) + goto uninit; + } INIT_LIST_HEAD(&dmadev->ddev.channels); rc = hidma_chan_init(dmadev, 0); @@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev) dmadev->irq = chirq; tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); hidma_debug_init(dmadev); - hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); + hidma_sysfs_init(dmadev); dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); - platform_set_drvdata(pdev, dmadev); pm_runtime_mark_last_busy(dmadev->ddev.dev); pm_runtime_put_autosuspend(dmadev->ddev.dev); return 0; uninit: + if (msi) + hidma_free_msis(dmadev); + hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); dmafree: @@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_async_device_unregister(&dmadev->ddev); - devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + if (!dmadev->lldev->msi_support) + devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + else + hidma_free_msis(dmadev); + tasklet_kill(&dmadev->task); + hidma_sysfs_uninit(dmadev); hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); hidma_free(dmadev); @@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev) #if IS_ENABLED(CONFIG_ACPI) static const struct acpi_device_id hidma_acpi_ids[] = { {"QCOM8061"}, + {"QCOM8062"}, {}, }; +MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); #endif static const struct of_device_id hidma_match[] = { {.compatible = "qcom,hidma-1.0",}, + {.compatible = "qcom,hidma-1.1",}, {}, }; MODULE_DEVICE_TABLE(of, hidma_match); diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index e52e20716303..c7d014235c32 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -46,6 +46,7 @@ struct hidma_tre { }; struct hidma_lldev { + bool msi_support; /* flag indicating MSI support */ bool initialized; /* initialized flag */ u8 trch_state; /* trch_state of the device */ u8 evch_state; /* evch_state of the device */ @@ -58,7 +59,7 @@ struct hidma_lldev { void __iomem *evca; /* Event Channel address */ struct hidma_tre **pending_tre_list; /* Pointers to pending TREs */ - s32 pending_tre_count; /* Number of TREs pending */ + atomic_t pending_tre_count; /* Number of TREs pending */ void *tre_ring; /* TRE ring */ dma_addr_t tre_dma; /* TRE ring to be shared with HW */ @@ -114,6 +115,7 @@ struct hidma_dev { int irq; int chidx; u32 nr_descriptors; + int msi_virqbase; struct hidma_lldev *lldev; void __iomem *dev_trca; @@ -128,6 +130,9 @@ struct hidma_dev { struct dentry *debugfs; struct dentry *stats; + /* sysfs entry for the channel id */ + struct device_attribute *chid_attrs; + /* Task delivering issue_pending */ struct tasklet_struct task; }; @@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev); int hidma_ll_enable(struct hidma_lldev *llhndl); void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); +void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); int hidma_ll_setup(struct hidma_lldev *lldev); struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, void __iomem *trca, void __iomem *evca, u8 chidx); int hidma_ll_uninit(struct hidma_lldev *llhndl); irqreturn_t hidma_ll_inthandler(int irq, void *arg); +irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, u8 err_code); int hidma_debug_init(struct hidma_dev *dmadev); diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index fa827e5ffd68..3bdcb8056a36 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c @@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl) seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma); seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size); seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off); - seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count); + seq_printf(s, "pending_tre_count=%d\n", + atomic_read(&lldev->pending_tre_count)); seq_printf(s, "evca=%p\n", lldev->evca); seq_printf(s, "evre_ring=%p\n", lldev->evre_ring); seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma); @@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = { void hidma_debug_uninit(struct hidma_dev *dmadev) { debugfs_remove_recursive(dmadev->debugfs); - debugfs_remove_recursive(dmadev->stats); } int hidma_debug_init(struct hidma_dev *dmadev) diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 3224f24c577b..6645bdf0d151 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg) } } -static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, - u8 err_info, u8 err_code) +static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info, + u8 err_code) { struct hidma_tre *tre; unsigned long flags; + u32 tre_iterator; spin_lock_irqsave(&lldev->lock, flags); + + tre_iterator = lldev->tre_processed_off; tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; if (!tre) { spin_unlock_irqrestore(&lldev->lock, flags); @@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, * Keep track of pending TREs that SW is expecting to receive * from HW. We got one now. Decrement our counter. */ - lldev->pending_tre_count--; - if (lldev->pending_tre_count < 0) { + if (atomic_dec_return(&lldev->pending_tre_count) < 0) { dev_warn(lldev->dev, "tre count mismatch on completion"); - lldev->pending_tre_count = 0; + atomic_set(&lldev->pending_tre_count, 0); } + HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, + lldev->tre_ring_size); + lldev->tre_processed_off = tre_iterator; spin_unlock_irqrestore(&lldev->lock, flags); tre->err_info = err_info; @@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, static int hidma_handle_tre_completion(struct hidma_lldev *lldev) { u32 evre_ring_size = lldev->evre_ring_size; - u32 tre_ring_size = lldev->tre_ring_size; u32 err_info, err_code, evre_write_off; - u32 tre_iterator, evre_iterator; + u32 evre_iterator; u32 num_completed = 0; evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); - tre_iterator = lldev->tre_processed_off; evre_iterator = lldev->evre_processed_off; if ((evre_write_off > evre_ring_size) || @@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev) err_code = (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; - if (hidma_post_completed(lldev, tre_iterator, err_info, - err_code)) + if (hidma_post_completed(lldev, err_info, err_code)) break; - HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, - tre_ring_size); HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, evre_ring_size); @@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev) evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); num_completed++; + + /* + * An error interrupt might have arrived while we are processing + * the completed interrupt. + */ + if (!hidma_ll_isenabled(lldev)) + break; } if (num_completed) { u32 evre_read_off = (lldev->evre_processed_off + HIDMA_EVRE_SIZE * num_completed); - u32 tre_read_off = (lldev->tre_processed_off + - HIDMA_TRE_SIZE * num_completed); - evre_read_off = evre_read_off % evre_ring_size; - tre_read_off = tre_read_off % tre_ring_size; - writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); /* record the last processed tre offset */ - lldev->tre_processed_off = tre_read_off; lldev->evre_processed_off = evre_read_off; } @@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev) void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, u8 err_code) { - u32 tre_iterator; - u32 tre_ring_size = lldev->tre_ring_size; - int num_completed = 0; - u32 tre_read_off; - - tre_iterator = lldev->tre_processed_off; - while (lldev->pending_tre_count) { - if (hidma_post_completed(lldev, tre_iterator, err_info, - err_code)) + while (atomic_read(&lldev->pending_tre_count)) { + if (hidma_post_completed(lldev, err_info, err_code)) break; - HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, - tre_ring_size); - num_completed++; } - tre_read_off = (lldev->tre_processed_off + - HIDMA_TRE_SIZE * num_completed); - - tre_read_off = tre_read_off % tre_ring_size; - - /* record the last processed tre offset */ - lldev->tre_processed_off = tre_read_off; } static int hidma_ll_reset(struct hidma_lldev *lldev) @@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev) * requests traditionally to the destination, this concept does not apply * here for this HW. */ -irqreturn_t hidma_ll_inthandler(int chirq, void *arg) +static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) { - struct hidma_lldev *lldev = arg; - u32 status; - u32 enable; - u32 cause; + if (cause & HIDMA_ERR_INT_MASK) { + dev_err(lldev->dev, "error 0x%x, disabling...\n", + cause); + + /* Clear out pending interrupts */ + writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + + /* No further submissions. */ + hidma_ll_disable(lldev); + + /* Driver completes the txn and intimates the client.*/ + hidma_cleanup_pending_tre(lldev, 0xFF, + HIDMA_EVRE_STATUS_ERROR); + + return; + } /* * Fine tuned for this HW... @@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg) * read and write accessors are used for performance reasons due to * interrupt delivery guarantees. Do not copy this code blindly and * expect that to work. + * + * Try to consume as many EVREs as possible. */ + hidma_handle_tre_completion(lldev); + + /* We consumed TREs or there are pending TREs or EVREs. */ + writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); +} + +irqreturn_t hidma_ll_inthandler(int chirq, void *arg) +{ + struct hidma_lldev *lldev = arg; + u32 status; + u32 enable; + u32 cause; + status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); cause = status & enable; while (cause) { - if (cause & HIDMA_ERR_INT_MASK) { - dev_err(lldev->dev, "error 0x%x, disabling...\n", - cause); - - /* Clear out pending interrupts */ - writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); - - /* No further submissions. */ - hidma_ll_disable(lldev); - - /* Driver completes the txn and intimates the client.*/ - hidma_cleanup_pending_tre(lldev, 0xFF, - HIDMA_EVRE_STATUS_ERROR); - goto out; - } - - /* - * Try to consume as many EVREs as possible. - */ - hidma_handle_tre_completion(lldev); - - /* We consumed TREs or there are pending TREs or EVREs. */ - writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + hidma_ll_int_handler_internal(lldev, cause); /* * Another interrupt might have arrived while we are @@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg) cause = status & enable; } -out: + return IRQ_HANDLED; +} + +irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause) +{ + struct hidma_lldev *lldev = arg; + + hidma_ll_int_handler_internal(lldev, cause); return IRQ_HANDLED; } @@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch) tre->err_code = 0; tre->err_info = 0; tre->queued = 1; - lldev->pending_tre_count++; + atomic_inc(&lldev->pending_tre_count); lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) % lldev->tre_ring_size; spin_unlock_irqrestore(&lldev->lock, flags); @@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev) u32 val; int ret; - val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); - lldev->evch_state = HIDMA_CH_STATE(val); - val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); - lldev->trch_state = HIDMA_CH_STATE(val); - - /* already suspended by this OS */ - if ((lldev->trch_state == HIDMA_CH_SUSPENDED) || - (lldev->evch_state == HIDMA_CH_SUSPENDED)) - return 0; - - /* already stopped by the manager */ - if ((lldev->trch_state == HIDMA_CH_STOPPED) || - (lldev->evch_state == HIDMA_CH_STOPPED)) + /* The channel needs to be in working state */ + if (!hidma_ll_isenabled(lldev)) return 0; val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); @@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev) u32 val; u32 nr_tres = lldev->nr_tres; - lldev->pending_tre_count = 0; + atomic_set(&lldev->pending_tre_count, 0); lldev->tre_processed_off = 0; lldev->evre_processed_off = 0; lldev->tre_write_offset = 0; @@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev) writel(HIDMA_EVRE_SIZE * nr_tres, lldev->evca + HIDMA_EVCA_RING_LEN_REG); - /* support IRQ only for now */ + /* configure interrupts */ + hidma_ll_setup_irq(lldev, lldev->msi_support); + + rc = hidma_ll_enable(lldev); + if (rc) + return rc; + + return rc; +} + +void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi) +{ + u32 val; + + lldev->msi_support = msi; + + /* disable interrupts again after reset */ + writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); + + /* support IRQ by default */ val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); val &= ~0xF; - val |= 0x1; + if (!lldev->msi_support) + val = val | 0x1; writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); /* clear all pending interrupts and enable them */ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); - - return hidma_ll_enable(lldev); } struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, @@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) tasklet_kill(&lldev->task); memset(lldev->trepool, 0, required_bytes); lldev->trepool = NULL; - lldev->pending_tre_count = 0; + atomic_set(&lldev->pending_tre_count, 0); lldev->tre_write_offset = 0; rc = hidma_ll_reset(lldev); diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 82f36e466083..f847d32cc4b5 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { {"QCOM8060"}, {}, }; +MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); #endif static const struct of_device_id hidma_mgmt_match[] = { @@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) ret = PTR_ERR(new_pdev); goto out; } + of_node_get(child); + new_pdev->dev.of_node = child; of_dma_configure(&new_pdev->dev, child); - + /* + * It is assumed that calling of_msi_configure is safe on + * platforms with or without MSI support. + */ + of_msi_configure(&new_pdev->dev, child); + of_node_put(child); kfree(res); res = NULL; } @@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void) for_each_matching_node(child, hidma_mgmt_match) { /* device tree based firmware here */ hidma_mgmt_of_populate_channels(child); - of_node_put(child); } #endif platform_driver_register(&hidma_mgmt_driver); |