diff options
author | Michael Schmitz <schmitzmic@gmail.com> | 2022-06-30 15:33:02 +1200 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2022-07-07 17:01:22 -0400 |
commit | 158da6bcae7a66e631bbec458f35ea3bd0ac5d71 (patch) | |
tree | 701538f7d4c28875204e3cdf8908843c5f742ef9 /drivers/scsi/gvp11.c | |
parent | 479accbbb8398a31e716d40d4d6ccce089f3de86 (diff) |
scsi: gvp11: Convert m68k WD33C93 drivers to DMA API
Use dma_map_single() for gvp11 driver (leave bounce buffer logic
unchanged).
Use dma_set_mask_and_coherent() to avoid explicit cache flushes.
Compile-tested only.
CC: linux-scsi@vger.kernel.org
Link: https://lore.kernel.org/r/6d1d88ee-1cf6-c735-1e6d-bafd2096e322@gmail.com
Link: https://lore.kernel.org/r/20220630033302.3183-4-schmitzmic@gmail.com
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Michael Schmitz <schmitzmic@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
--
Changes from v1:
Arnd Bergmann:
- reorder bounce buffer copy and dma mapping
Diffstat (limited to 'drivers/scsi/gvp11.c')
-rw-r--r-- | drivers/scsi/gvp11.c | 95 |
1 files changed, 77 insertions, 18 deletions
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 2f6c56aabe1d..e8b7a09eb8c7 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -26,8 +26,12 @@ struct gvp11_hostdata { struct WD33C93_hostdata wh; struct gvp11_scsiregs *regs; + struct device *dev; }; +#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define TO_DMA_MASK(m) ((~(m & 0xfffffff0))-1) + static irqreturn_t gvp11_intr(int irq, void *data) { struct Scsi_Host *instance = data; @@ -54,17 +58,33 @@ void gvp11_setup(char *str, int *ints) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); + unsigned long len = scsi_pointer->this_residual; struct Scsi_Host *instance = cmd->device->host; struct gvp11_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct gvp11_scsiregs *regs = hdata->regs; unsigned short cntr = GVP11_DMAC_INT_ENABLE; - unsigned long addr = virt_to_bus(scsi_pointer->ptr); + dma_addr_t addr; int bank_mask; static int scsi_alloc_out_of_range = 0; + addr = dma_map_single(hdata->dev, scsi_pointer->ptr, + len, DMA_DIR(dir_in)); + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, "cannot map SCSI data block %p\n", + scsi_pointer->ptr); + return 1; + } + scsi_pointer->dma_handle = addr; + /* use bounce buffer if the physical address is bad */ if (addr & wh->dma_xfer_mask) { + /* drop useless mapping */ + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(dir_in)); + scsi_pointer->dma_handle = (dma_addr_t) NULL; + wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; if (!scsi_alloc_out_of_range) { @@ -87,10 +107,32 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } - /* check if the address of the bounce buffer is OK */ - addr = virt_to_bus(wh->dma_bounce_buffer); + if (!dir_in) { + /* copy to bounce buffer for a write */ + memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, + scsi_pointer->this_residual); + } + + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { + /* will flush/invalidate cache for us */ + addr = dma_map_single(hdata->dev, + wh->dma_bounce_buffer, + wh->dma_bounce_len, + DMA_DIR(dir_in)); + /* can't map buffer; use PIO */ + if (dma_mapping_error(hdata->dev, addr)) { + dev_warn(hdata->dev, + "cannot map bounce buffer %p\n", + wh->dma_bounce_buffer); + return 1; + } + } if (addr & wh->dma_xfer_mask) { + /* drop useless mapping */ + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(dir_in)); /* fall back to Chip RAM if address out of range */ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { kfree(wh->dma_bounce_buffer); @@ -108,15 +150,19 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) return 1; } - addr = virt_to_bus(wh->dma_bounce_buffer); + if (!dir_in) { + /* copy to bounce buffer for a write */ + memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, + scsi_pointer->this_residual); + } + /* chip RAM can be mapped to phys. address directly */ + addr = virt_to_phys(wh->dma_bounce_buffer); + /* no need to flush/invalidate cache */ wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } + /* finally, have OK mapping (punted for PIO else) */ + scsi_pointer->dma_handle = addr; - if (!dir_in) { - /* copy to bounce buffer for a write */ - memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr, - scsi_pointer->this_residual); - } } /* setup dma direction */ @@ -129,13 +175,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) /* setup DMA *physical* address */ regs->ACR = addr; - if (dir_in) { - /* invalidate any cache */ - cache_clear(addr, scsi_pointer->this_residual); - } else { - /* push any dirty cache */ - cache_push(addr, scsi_pointer->this_residual); - } + /* no more cache flush here - dma_map_single() takes care */ bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; if (bank_mask) @@ -161,6 +201,11 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, /* remove write bit from CONTROL bits */ regs->CNTR = GVP11_DMAC_INT_ENABLE; + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) + dma_unmap_single(hdata->dev, scsi_pointer->dma_handle, + scsi_pointer->this_residual, + DMA_DIR(wh->dma_dir)); + /* copy from a bounce buffer, if necessary */ if (status && wh->dma_bounce_buffer) { if (wh->dma_dir && SCpnt) @@ -287,6 +332,13 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) default_dma_xfer_mask = ent->driver_data; + if (dma_set_mask_and_coherent(&z->dev, + TO_DMA_MASK(default_dma_xfer_mask))) { + dev_warn(&z->dev, "cannot use DMA mask %x\n", + TO_DMA_MASK(default_dma_xfer_mask)); + return -ENODEV; + } + /* * Rumors state that some GVP ram boards use the same product * code as the SCSI controllers. Therefore if the board-size @@ -327,9 +379,16 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) wdregs.SCMD = ®s->SCMD; hdata = shost_priv(instance); - if (gvp11_xfer_mask) + if (gvp11_xfer_mask) { hdata->wh.dma_xfer_mask = gvp11_xfer_mask; - else + if (dma_set_mask_and_coherent(&z->dev, + TO_DMA_MASK(gvp11_xfer_mask))) { + dev_warn(&z->dev, "cannot use DMA mask %x\n", + TO_DMA_MASK(gvp11_xfer_mask)); + error = -ENODEV; + goto fail_check_or_alloc; + } + } else hdata->wh.dma_xfer_mask = default_dma_xfer_mask; hdata->wh.no_sync = 0xff; |