diff options
-rw-r--r-- | drivers/crypto/ux500/cryp/cryp.c | 4 | ||||
-rw-r--r-- | drivers/crypto/ux500/cryp/cryp_core.c | 26 | ||||
-rw-r--r-- | drivers/crypto/ux500/hash/hash_core.c | 33 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 18 |
4 files changed, 40 insertions, 41 deletions
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 3eafa903ebcd..43a0c8a26ab0 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -291,7 +291,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data, int cryp_mode) { enum cryp_algo_mode algomode; - struct cryp_register *src_reg = device_data->base; + struct cryp_register __iomem *src_reg = device_data->base; struct cryp_config *config = (struct cryp_config *)device_data->current_ctx; @@ -349,7 +349,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data, void cryp_restore_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx) { - struct cryp_register *reg = device_data->base; + struct cryp_register __iomem *reg = device_data->base; struct cryp_config *config = (struct cryp_config *)device_data->current_ctx; diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 4f8b11af29a6..0257f6b32642 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -553,10 +553,10 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " "(TO_DEVICE)", __func__); - desc = channel->device->device_prep_slave_sg(channel, - ctx->device->dma.sg_src, - ctx->device->dma.sg_src_len, - direction, DMA_CTRL_ACK, NULL); + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg_src, + ctx->device->dma.sg_src_len, + direction, DMA_CTRL_ACK); break; case DMA_FROM_DEVICE: @@ -577,12 +577,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " "(FROM_DEVICE)", __func__); - desc = channel->device->device_prep_slave_sg(channel, - ctx->device->dma.sg_dst, - ctx->device->dma.sg_dst_len, - direction, - DMA_CTRL_ACK | - DMA_PREP_INTERRUPT, NULL); + desc = dmaengine_prep_slave_sg(channel, + ctx->device->dma.sg_dst, + ctx->device->dma.sg_dst_len, + direction, + DMA_CTRL_ACK | + DMA_PREP_INTERRUPT); desc->callback = cryp_dma_out_callback; desc->callback_param = ctx; @@ -594,7 +594,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, return -EFAULT; } - cookie = desc->tx_submit(desc); + cookie = dmaengine_submit(desc); dma_async_issue_pending(channel); return 0; @@ -607,12 +607,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx) dev_dbg(ctx->device->dev, "[%s]: ", __func__); chan = ctx->device->dma.chan_mem2cryp; - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, ctx->device->dma.sg_src_len, DMA_TO_DEVICE); chan = ctx->device->dma.chan_cryp2mem; - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); } diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 9ca6fbb5e30d..95490f14ddb8 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -180,9 +180,9 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " "(TO_DEVICE)", __func__); - desc = channel->device->device_prep_slave_sg(channel, + desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg, ctx->device->dma.sg_len, - direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL); + direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(ctx->device->dev, "[%s]: device_prep_slave_sg() failed!", __func__); @@ -192,7 +192,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, desc->callback = hash_dma_callback; desc->callback_param = ctx; - cookie = desc->tx_submit(desc); + cookie = dmaengine_submit(desc); dma_async_issue_pending(channel); return 0; @@ -203,7 +203,7 @@ static void hash_dma_done(struct hash_ctx *ctx) struct dma_chan *chan; chan = ctx->device->dma.chan_mem2hash; - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, ctx->device->dma.sg_len, DMA_TO_DEVICE); @@ -473,12 +473,12 @@ static void hash_hw_write_key(struct hash_device_data *device_data, HASH_SET_DIN(&word, nwords); } - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); HASH_SET_DCAL; - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); } @@ -661,7 +661,7 @@ static void hash_messagepad(struct hash_device_data *device_data, if (index_bytes) HASH_SET_DIN(message, nwords); - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ @@ -676,7 +676,7 @@ static void hash_messagepad(struct hash_device_data *device_data, (int)(readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK)); - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); } @@ -776,7 +776,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) /* HW and SW initializations */ /* Note: there is no need to initialize buffer and digest members */ - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); /* @@ -792,8 +792,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); } -int hash_process_data( - struct hash_device_data *device_data, +static int hash_process_data(struct hash_device_data *device_data, struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) { @@ -962,7 +961,7 @@ static int hash_dma_final(struct ahash_request *req) wait_for_completion(&ctx->device->dma.complete); hash_dma_done(ctx); - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { @@ -992,7 +991,7 @@ out: * hash_hw_final - The final hash calculation function * @req: The hash request for the job. */ -int hash_hw_final(struct ahash_request *req) +static int hash_hw_final(struct ahash_request *req) { int ret = 0; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -1060,7 +1059,7 @@ int hash_hw_final(struct ahash_request *req) req_ctx->state.index); } else { HASH_SET_DCAL; - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); } @@ -1189,7 +1188,7 @@ int hash_resume_state(struct hash_device_data *device_data, temp_cr = device_state->temp_cr; writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); - if (device_data->base->cr & HASH_CR_MODE_MASK) + if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) hash_mode = HASH_OPER_MODE_HMAC; else hash_mode = HASH_OPER_MODE_HASH; @@ -1233,7 +1232,7 @@ int hash_save_state(struct hash_device_data *device_data, * actually makes sure that there isn't any ongoing calculation in the * hardware. */ - while (device_data->base->str & HASH_STR_DCAL_MASK) + while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) cpu_relax(); temp_cr = readl_relaxed(&device_data->base->cr); @@ -1242,7 +1241,7 @@ int hash_save_state(struct hash_device_data *device_data, device_state->din_reg = readl_relaxed(&device_data->base->din); - if (device_data->base->cr & HASH_CR_MODE_MASK) + if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) hash_mode = HASH_OPER_MODE_HMAC; else hash_mode = HASH_OPER_MODE_HASH; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index fa4f9a33a74d..5ab5880d5c90 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -78,7 +78,7 @@ static int dma40_memcpy_channels[] = { }; /* Default configuration for physcial memcpy */ -struct stedma40_chan_cfg dma40_memcpy_conf_phy = { +static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { .mode = STEDMA40_MODE_PHYSICAL, .dir = DMA_MEM_TO_MEM, @@ -92,7 +92,7 @@ struct stedma40_chan_cfg dma40_memcpy_conf_phy = { }; /* Default configuration for logical memcpy */ -struct stedma40_chan_cfg dma40_memcpy_conf_log = { +static struct stedma40_chan_cfg dma40_memcpy_conf_log = { .mode = STEDMA40_MODE_LOGICAL, .dir = DMA_MEM_TO_MEM, @@ -3537,7 +3537,6 @@ static int __init d40_probe(struct platform_device *pdev) { struct stedma40_platform_data *plat_data = pdev->dev.platform_data; struct device_node *np = pdev->dev.of_node; - int err; int ret = -ENOENT; struct d40_base *base = NULL; struct resource *res = NULL; @@ -3649,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev) base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); if (IS_ERR(base->lcpa_regulator)) { d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); + ret = PTR_ERR(base->lcpa_regulator); base->lcpa_regulator = NULL; goto failure; } @@ -3664,13 +3664,13 @@ static int __init d40_probe(struct platform_device *pdev) } base->initialized = true; - err = d40_dmaengine_init(base, num_reserved_chans); - if (err) + ret = d40_dmaengine_init(base, num_reserved_chans); + if (ret) goto failure; base->dev->dma_parms = &base->dma_parms; - err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); - if (err) { + ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); + if (ret) { d40_err(&pdev->dev, "Failed to set dma max seg size\n"); goto failure; } @@ -3678,8 +3678,8 @@ static int __init d40_probe(struct platform_device *pdev) d40_hw_init(base); if (np) { - err = of_dma_controller_register(np, d40_xlate, NULL); - if (err && err != -ENODEV) + ret = of_dma_controller_register(np, d40_xlate, NULL); + if (ret) dev_err(&pdev->dev, "could not register of_dma_controller\n"); } |