diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/cmd.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 165 |
1 files changed, 84 insertions, 81 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index bfe410e8a469..caa837e5e2b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -54,14 +54,6 @@ enum { }; enum { - NUM_LONG_LISTS = 2, - NUM_MED_LISTS = 64, - LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + - MLX5_CMD_DATA_BLOCK_SIZE, - MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, -}; - -enum { MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, @@ -313,6 +305,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -414,11 +408,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP: - case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -575,6 +572,12 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); + MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); + MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); default: return "unknown command opcode"; } } @@ -1058,14 +1061,13 @@ static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, if (!mailbox) return ERR_PTR(-ENOMEM); - mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, - &mailbox->dma); + mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags, + &mailbox->dma); if (!mailbox->buf) { mlx5_core_dbg(dev, "failed allocation\n"); kfree(mailbox); return ERR_PTR(-ENOMEM); } - memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); mailbox->next = NULL; return mailbox; @@ -1356,10 +1358,10 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { unsigned long flags; - if (msg->cache) { - spin_lock_irqsave(&msg->cache->lock, flags); - list_add_tail(&msg->list, &msg->cache->head); - spin_unlock_irqrestore(&msg->cache->lock, flags); + if (msg->parent) { + spin_lock_irqsave(&msg->parent->lock, flags); + list_add_tail(&msg->list, &msg->parent->head); + spin_unlock_irqrestore(&msg->parent->lock, flags); } else { mlx5_free_cmd_msg(dev, msg); } @@ -1456,30 +1458,37 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct cmd_msg_cache *ch = NULL; struct mlx5_cmd *cmd = &dev->cmd; - struct cache_ent *ent = NULL; - - if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) - ent = &cmd->cache.large; - else if (in_size > 16 && in_size <= MED_LIST_SIZE) - ent = &cmd->cache.med; - - if (ent) { - spin_lock_irq(&ent->lock); - if (!list_empty(&ent->head)) { - msg = list_entry(ent->head.next, typeof(*msg), list); - /* For cached lists, we must explicitly state what is - * the real size - */ - msg->len = in_size; - list_del(&msg->list); + int i; + + if (in_size <= 16) + goto cache_miss; + + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { + ch = &cmd->cache[i]; + if (in_size > ch->max_inbox_size) + continue; + spin_lock_irq(&ch->lock); + if (list_empty(&ch->head)) { + spin_unlock_irq(&ch->lock); + continue; } - spin_unlock_irq(&ent->lock); + msg = list_entry(ch->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + spin_unlock_irq(&ch->lock); + break; } - if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); + if (!IS_ERR(msg)) + return msg; +cache_miss: + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg; } @@ -1577,58 +1586,56 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb); static void destroy_msg_cache(struct mlx5_core_dev *dev) { - struct mlx5_cmd *cmd = &dev->cmd; + struct cmd_msg_cache *ch; struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *n; + int i; - list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { - list_del(&msg->list); - mlx5_free_cmd_msg(dev, msg); - } - - list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { - list_del(&msg->list); - mlx5_free_cmd_msg(dev, msg); + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { + ch = &dev->cmd.cache[i]; + list_for_each_entry_safe(msg, n, &ch->head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } } } -static int create_msg_cache(struct mlx5_core_dev *dev) +static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { + 512, 32, 16, 8, 2 +}; + +static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { + 16 + MLX5_CMD_DATA_BLOCK_SIZE, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, +}; + +static void create_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; + struct cmd_msg_cache *ch; struct mlx5_cmd_msg *msg; - int err; int i; - - spin_lock_init(&cmd->cache.large.lock); - INIT_LIST_HEAD(&cmd->cache.large.head); - spin_lock_init(&cmd->cache.med.lock); - INIT_LIST_HEAD(&cmd->cache.med.head); - - for (i = 0; i < NUM_LONG_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); - if (IS_ERR(msg)) { - err = PTR_ERR(msg); - goto ex_err; - } - msg->cache = &cmd->cache.large; - list_add_tail(&msg->list, &cmd->cache.large.head); - } - - for (i = 0; i < NUM_MED_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); - if (IS_ERR(msg)) { - err = PTR_ERR(msg); - goto ex_err; + int k; + + /* Initialize and fill the caches with initial entries */ + for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { + ch = &cmd->cache[k]; + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->head); + ch->num_ent = cmd_cache_num_ent[k]; + ch->max_inbox_size = cmd_cache_ent_size[k]; + for (i = 0; i < ch->num_ent; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, + ch->max_inbox_size, 0); + if (IS_ERR(msg)) + break; + msg->parent = ch; + list_add_tail(&msg->list, &ch->head); } - msg->cache = &cmd->cache.med; - list_add_tail(&msg->list, &cmd->cache.med.head); } - - return 0; - -ex_err: - destroy_msg_cache(dev); - return err; } static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) @@ -1721,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) if (cmd->cmdif_rev > CMD_IF_REV) { dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); - err = -ENOTSUPP; + err = -EOPNOTSUPP; goto err_free_page; } @@ -1751,11 +1758,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->mode = CMD_MODE_POLLING; - err = create_msg_cache(dev); - if (err) { - dev_err(&dev->pdev->dev, "failed to create command cache\n"); - goto err_free_page; - } + create_msg_cache(dev); set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); |