diff options
author | Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | 2023-10-18 17:58:12 +0530 |
---|---|---|
committer | Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | 2023-12-14 11:58:45 +0530 |
commit | 62210a26cd4f8ad52683a71c0226dfe85de1144d (patch) | |
tree | 90a1ccbfe35a9f8d31dab080560217bc66e05293 /drivers/bus | |
parent | eff9704f5332a13b08fbdbe0f84059c9e7051d5f (diff) |
bus: mhi: ep: Use slab allocator where applicable
Use slab allocator for allocating the memory for objects used frequently
and are of fixed size. This reduces the overheard associated with
kmalloc().
Suggested-by: Alex Elder <elder@linaro.org>
Link: https://lore.kernel.org/r/20231018122812.47261-1-manivannan.sadhasivam@linaro.org
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Diffstat (limited to 'drivers/bus')
-rw-r--r-- | drivers/bus/mhi/ep/main.c | 66 |
1 files changed, 49 insertions, 17 deletions
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index e2513f5f47a6..517279600645 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -74,7 +74,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m struct mhi_ring_element *event; int ret; - event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); if (!event) return -ENOMEM; @@ -83,7 +83,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); - kfree(event); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); return ret; } @@ -93,7 +93,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat struct mhi_ring_element *event; int ret; - event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); if (!event) return -ENOMEM; @@ -101,7 +101,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); - kfree(event); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); return ret; } @@ -111,7 +111,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e struct mhi_ring_element *event; int ret; - event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); if (!event) return -ENOMEM; @@ -119,7 +119,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); - kfree(event); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); return ret; } @@ -130,7 +130,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e struct mhi_ring_element *event; int ret; - event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); if (!event) return -ENOMEM; @@ -139,7 +139,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); - kfree(event); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); return ret; } @@ -451,7 +451,7 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - result.buf_addr = kzalloc(len, GFP_KERNEL); + result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); if (!result.buf_addr) return -ENOMEM; @@ -459,7 +459,7 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - kfree(result.buf_addr); + kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); return ret; } @@ -471,7 +471,7 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem /* Read until the ring becomes empty */ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); - kfree(result.buf_addr); + kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr); } return 0; @@ -780,14 +780,14 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) if (ret) { dev_err(dev, "Error updating write offset for ring\n"); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } /* Sanity check to make sure there are elements in the ring */ if (ring->rd_offset == ring->wr_offset) { mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } @@ -799,12 +799,12 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) dev_err(dev, "Error processing ring for channel (%u): %d\n", ring->ch_id, ret); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); } } @@ -860,7 +860,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon u32 ch_id = ch_idx + i; ring = &mhi_cntrl->mhi_chan[ch_id].ring; - item = kzalloc(sizeof(*item), GFP_ATOMIC); + item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); if (!item) return; @@ -1407,6 +1407,29 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", + sizeof(struct mhi_ring_element), 0, + SLAB_CACHE_DMA, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, + SLAB_CACHE_DMA, NULL); + if (!mhi_cntrl->tre_buf_cache) { + ret = -ENOMEM; + goto err_destroy_ev_ring_el_cache; + } + + mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", + sizeof(struct mhi_ep_ring_item), 0, + 0, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_destroy_tre_buf_cache; + } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); @@ -1415,7 +1438,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { ret = -ENOMEM; - goto err_free_cmd; + goto err_destroy_ring_item_cache; } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); @@ -1474,6 +1497,12 @@ err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: destroy_workqueue(mhi_cntrl->wq); +err_destroy_ring_item_cache: + kmem_cache_destroy(mhi_cntrl->ring_item_cache); +err_destroy_ev_ring_el_cache: + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); +err_destroy_tre_buf_cache: + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -1495,6 +1524,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) free_irq(mhi_cntrl->irq, mhi_cntrl); + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); + kmem_cache_destroy(mhi_cntrl->ring_item_cache); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); |