diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/tpm/st33zp24/i2c.c | 3 | ||||
-rw-r--r-- | drivers/char/tpm/st33zp24/spi.c | 3 | ||||
-rw-r--r-- | drivers/char/tpm/tpm-interface.c | 118 | ||||
-rw-r--r-- | drivers/char/tpm/tpm-sysfs.c | 6 | ||||
-rw-r--r-- | drivers/char/tpm/tpm.h | 22 | ||||
-rw-r--r-- | drivers/char/tpm/tpm2-cmd.c | 2 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_atmel.c | 12 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_i2c_infineon.c | 76 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_infineon.c | 8 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis.c | 175 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_vtpm_proxy.c | 69 | ||||
-rw-r--r-- | drivers/char/tpm/tpmrm-dev.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/Makefile | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/cache.c | 43 | ||||
-rw-r--r-- | drivers/infiniband/core/core_priv.h | 115 | ||||
-rw-r--r-- | drivers/infiniband/core/device.c | 86 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 52 | ||||
-rw-r--r-- | drivers/infiniband/core/security.c | 705 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 27 |
20 files changed, 1294 insertions, 248 deletions
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 1b10e38f214e..be5d1abd3e8e 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -127,7 +127,7 @@ static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client) struct device *dev = &client->dev; int ret; - ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), acpi_st33zp24_gpios); + ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios); if (ret) return ret; @@ -285,7 +285,6 @@ static int st33zp24_i2c_remove(struct i2c_client *client) if (ret) return ret; - acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev)); return 0; } diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index c69d15198f84..0fc4f20b5f83 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -246,7 +246,7 @@ static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev) struct device *dev = &spi_dev->dev; int ret; - ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), acpi_st33zp24_gpios); + ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios); if (ret) return ret; @@ -402,7 +402,6 @@ static int st33zp24_spi_remove(struct spi_device *dev) if (ret) return ret; - acpi_dev_remove_driver_gpios(ACPI_COMPANION(&dev->dev)); return 0; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 158c1db83f05..d2b4df6d9894 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -416,7 +416,8 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, /* Store the decision as chip->locality will be changed. */ need_locality = chip->locality == -1; - if (need_locality && chip->ops->request_locality) { + if (!(flags & TPM_TRANSMIT_RAW) && + need_locality && chip->ops->request_locality) { rc = chip->ops->request_locality(chip, 0); if (rc < 0) goto out_no_locality; @@ -429,8 +430,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, rc = chip->ops->send(chip, (u8 *) buf, count); if (rc < 0) { - dev_err(&chip->dev, - "tpm_transmit: tpm_send: error %d\n", rc); + if (rc != -EPIPE) + dev_err(&chip->dev, + "%s: tpm_send: error %d\n", __func__, rc); goto out; } @@ -536,59 +538,62 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, return 0; } +EXPORT_SYMBOL_GPL(tpm_transmit_cmd); #define TPM_DIGEST_SIZE 20 #define TPM_RET_CODE_IDX 6 #define TPM_INTERNAL_RESULT_SIZE 200 -#define TPM_ORD_GET_CAP cpu_to_be32(101) -#define TPM_ORD_GET_RANDOM cpu_to_be32(70) +#define TPM_ORD_GET_CAP 101 +#define TPM_ORD_GET_RANDOM 70 static const struct tpm_input_header tpm_getcap_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(22), - .ordinal = TPM_ORD_GET_CAP + .ordinal = cpu_to_be32(TPM_ORD_GET_CAP) }; ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, const char *desc, size_t min_cap_length) { - struct tpm_cmd_t tpm_cmd; + struct tpm_buf buf; int rc; - tpm_cmd.header.in = tpm_getcap_header; + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP); + if (rc) + return rc; + if (subcap_id == TPM_CAP_VERSION_1_1 || subcap_id == TPM_CAP_VERSION_1_2) { - tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id); - /*subcap field not necessary */ - tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0); - tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32)); + tpm_buf_append_u32(&buf, subcap_id); + tpm_buf_append_u32(&buf, 0); } else { if (subcap_id == TPM_CAP_FLAG_PERM || subcap_id == TPM_CAP_FLAG_VOL) - tpm_cmd.params.getcap_in.cap = - cpu_to_be32(TPM_CAP_FLAG); + tpm_buf_append_u32(&buf, TPM_CAP_FLAG); else - tpm_cmd.params.getcap_in.cap = - cpu_to_be32(TPM_CAP_PROP); - tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4); - tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id); + tpm_buf_append_u32(&buf, TPM_CAP_PROP); + + tpm_buf_append_u32(&buf, 4); + tpm_buf_append_u32(&buf, subcap_id); } - rc = tpm_transmit_cmd(chip, NULL, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, + rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, min_cap_length, 0, desc); if (!rc) - *cap = tpm_cmd.params.getcap_out.cap; + *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4]; + + tpm_buf_destroy(&buf); return rc; } EXPORT_SYMBOL_GPL(tpm_getcap); -#define TPM_ORD_STARTUP cpu_to_be32(153) +#define TPM_ORD_STARTUP 153 #define TPM_ST_CLEAR cpu_to_be16(1) #define TPM_ST_STATE cpu_to_be16(2) #define TPM_ST_DEACTIVATED cpu_to_be16(3) static const struct tpm_input_header tpm_startup_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(12), - .ordinal = TPM_ORD_STARTUP + .ordinal = cpu_to_be32(TPM_ORD_STARTUP) }; static int tpm_startup(struct tpm_chip *chip, __be16 startup_type) @@ -737,7 +742,7 @@ EXPORT_SYMBOL_GPL(tpm_get_timeouts); #define CONTINUE_SELFTEST_RESULT_SIZE 10 static const struct tpm_input_header continue_selftest_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(10), .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST), }; @@ -760,13 +765,13 @@ static int tpm_continue_selftest(struct tpm_chip *chip) return rc; } -#define TPM_ORDINAL_PCRREAD cpu_to_be32(21) +#define TPM_ORDINAL_PCRREAD 21 #define READ_PCR_RESULT_SIZE 30 #define READ_PCR_RESULT_BODY_SIZE 20 static const struct tpm_input_header pcrread_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(14), - .ordinal = TPM_ORDINAL_PCRREAD + .ordinal = cpu_to_be32(TPM_ORDINAL_PCRREAD) }; int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) @@ -838,15 +843,34 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) } EXPORT_SYMBOL_GPL(tpm_pcr_read); -#define TPM_ORD_PCR_EXTEND cpu_to_be32(20) +#define TPM_ORD_PCR_EXTEND 20 #define EXTEND_PCR_RESULT_SIZE 34 #define EXTEND_PCR_RESULT_BODY_SIZE 20 static const struct tpm_input_header pcrextend_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(34), - .ordinal = TPM_ORD_PCR_EXTEND + .ordinal = cpu_to_be32(TPM_ORD_PCR_EXTEND) }; +static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash, + char *log_msg) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, pcr_idx); + tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE); + + rc = tpm_transmit_cmd(chip, NULL, buf.data, EXTEND_PCR_RESULT_SIZE, + EXTEND_PCR_RESULT_BODY_SIZE, 0, log_msg); + tpm_buf_destroy(&buf); + return rc; +} + /** * tpm_pcr_extend - extend pcr value with hash * @chip_num: tpm idx # or AN& @@ -859,7 +883,6 @@ static const struct tpm_input_header pcrextend_header = { */ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { - struct tpm_cmd_t cmd; int rc; struct tpm_chip *chip; struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)]; @@ -885,13 +908,8 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) return rc; } - cmd.header.in = pcrextend_header; - cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx); - memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, NULL, &cmd, EXTEND_PCR_RESULT_SIZE, - EXTEND_PCR_RESULT_BODY_SIZE, 0, - "attempting extend a PCR value"); - + rc = tpm1_pcr_extend(chip, pcr_idx, hash, + "attempting extend a PCR value"); tpm_put_ops(chip); return rc; } @@ -1060,13 +1078,13 @@ again: } EXPORT_SYMBOL_GPL(wait_for_tpm_stat); -#define TPM_ORD_SAVESTATE cpu_to_be32(152) +#define TPM_ORD_SAVESTATE 152 #define SAVESTATE_RESULT_SIZE 10 static const struct tpm_input_header savestate_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(10), - .ordinal = TPM_ORD_SAVESTATE + .ordinal = cpu_to_be32(TPM_ORD_SAVESTATE) }; /* @@ -1090,15 +1108,9 @@ int tpm_pm_suspend(struct device *dev) } /* for buggy tpm, flush pcrs with extend to selected dummy */ - if (tpm_suspend_pcr) { - cmd.header.in = pcrextend_header; - cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr); - memcpy(cmd.params.pcrextend_in.hash, dummy_hash, - TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, NULL, &cmd, EXTEND_PCR_RESULT_SIZE, - EXTEND_PCR_RESULT_BODY_SIZE, 0, - "extending dummy pcr before suspend"); - } + if (tpm_suspend_pcr) + rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash, + "extending dummy pcr before suspend"); /* now do the actual savestate */ for (try = 0; try < TPM_RETRY; try++) { @@ -1149,9 +1161,9 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume); #define TPM_GETRANDOM_RESULT_SIZE 18 static const struct tpm_input_header tpm_getrandom_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(14), - .ordinal = TPM_ORD_GET_RANDOM + .ordinal = cpu_to_be32(TPM_ORD_GET_RANDOM) }; /** diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 55405dbe43fa..4bd0997cfa2d 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -22,11 +22,11 @@ #define READ_PUBEK_RESULT_SIZE 314 #define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256) -#define TPM_ORD_READPUBEK cpu_to_be32(124) +#define TPM_ORD_READPUBEK 124 static const struct tpm_input_header tpm_readpubek_header = { - .tag = TPM_TAG_RQU_COMMAND, + .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), .length = cpu_to_be32(30), - .ordinal = TPM_ORD_READPUBEK + .ordinal = cpu_to_be32(TPM_ORD_READPUBEK) }; static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 4b4c8dee3096..1df0521138d3 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -247,7 +247,7 @@ struct tpm_output_header { __be32 return_code; } __packed; -#define TPM_TAG_RQU_COMMAND cpu_to_be16(193) +#define TPM_TAG_RQU_COMMAND 193 struct stclear_flags_t { __be16 tag; @@ -339,17 +339,6 @@ enum tpm_sub_capabilities { TPM_CAP_PROP_TIS_DURATION = 0x120, }; -struct tpm_getcap_params_in { - __be32 cap; - __be32 subcap_size; - __be32 subcap; -} __packed; - -struct tpm_getcap_params_out { - __be32 cap_size; - cap_t cap; -} __packed; - struct tpm_readpubek_params_out { u8 algorithm[4]; u8 encscheme[2]; @@ -374,11 +363,6 @@ struct tpm_pcrread_in { __be32 pcr_idx; } __packed; -struct tpm_pcrextend_in { - __be32 pcr_idx; - u8 hash[TPM_DIGEST_SIZE]; -} __packed; - /* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18 * bytes, but 128 is still a relatively large number of random bytes and * anything much bigger causes users of struct tpm_cmd_t to start getting @@ -399,13 +383,10 @@ struct tpm_startup_in { } __packed; typedef union { - struct tpm_getcap_params_out getcap_out; struct tpm_readpubek_params_out readpubek_out; u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)]; - struct tpm_getcap_params_in getcap_in; struct tpm_pcrread_in pcrread_in; struct tpm_pcrread_out pcrread_out; - struct tpm_pcrextend_in pcrextend_in; struct tpm_getrandom_in getrandom_in; struct tpm_getrandom_out getrandom_out; struct tpm_startup_in startup_in; @@ -525,6 +506,7 @@ extern struct idr dev_nums_idr; enum tpm_transmit_flags { TPM_TRANSMIT_UNLOCKED = BIT(0), + TPM_TRANSMIT_RAW = BIT(1), }; ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 3ee6883f26c1..3a9964326279 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -840,7 +840,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) /* In places where shutdown command is sent there's no much we can do * except print the error code on a system failure. */ - if (rc < 0) + if (rc < 0 && rc != -EPIPE) dev_warn(&chip->dev, "transmit returned %d while stopping the TPM", rc); } diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 0d322ab11faa..66a14526aaf4 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -144,13 +144,11 @@ static void atml_plat_remove(void) struct tpm_chip *chip = dev_get_drvdata(&pdev->dev); struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev); - if (chip) { - tpm_chip_unregister(chip); - if (priv->have_region) - atmel_release_region(priv->base, priv->region_size); - atmel_put_base_addr(priv->iobase); - platform_device_unregister(pdev); - } + tpm_chip_unregister(chip); + if (priv->have_region) + atmel_release_region(priv->base, priv->region_size); + atmel_put_base_addr(priv->iobase); + platform_device_unregister(pdev); } static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index dc47fa222a26..79d6bbb58e39 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -70,6 +70,7 @@ struct tpm_inf_dev { u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ struct tpm_chip *chip; enum i2c_chip_type chip_type; + unsigned int adapterlimit; }; static struct tpm_inf_dev tpm_dev; @@ -111,6 +112,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len) int rc = 0; int count; + unsigned int msglen = len; /* Lock the adapter for the duration of the whole sequence. */ if (!tpm_dev.client->adapter->algo->master_xfer) @@ -131,27 +133,61 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len) usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); } } else { - /* slb9635 protocol should work in all cases */ - for (count = 0; count < MAX_COUNT; count++) { - rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1); - if (rc > 0) - break; /* break here to skip sleep */ - - usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); - } - - if (rc <= 0) - goto out; - - /* After the TPM has successfully received the register address - * it needs some time, thus we're sleeping here again, before - * retrieving the data + /* Expect to send one command message and one data message, but + * support looping over each or both if necessary. */ - for (count = 0; count < MAX_COUNT; count++) { - usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); - rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1); - if (rc > 0) - break; + while (len > 0) { + /* slb9635 protocol should work in all cases */ + for (count = 0; count < MAX_COUNT; count++) { + rc = __i2c_transfer(tpm_dev.client->adapter, + &msg1, 1); + if (rc > 0) + break; /* break here to skip sleep */ + + usleep_range(SLEEP_DURATION_LOW, + SLEEP_DURATION_HI); + } + + if (rc <= 0) + goto out; + + /* After the TPM has successfully received the register + * address it needs some time, thus we're sleeping here + * again, before retrieving the data + */ + for (count = 0; count < MAX_COUNT; count++) { + if (tpm_dev.adapterlimit) { + msglen = min_t(unsigned int, + tpm_dev.adapterlimit, + len); + msg2.len = msglen; + } + usleep_range(SLEEP_DURATION_LOW, + SLEEP_DURATION_HI); + rc = __i2c_transfer(tpm_dev.client->adapter, + &msg2, 1); + if (rc > 0) { + /* Since len is unsigned, make doubly + * sure we do not underflow it. + */ + if (msglen > len) + len = 0; + else + len -= msglen; + msg2.buf += msglen; + break; + } + /* If the I2C adapter rejected the request (e.g + * when the quirk read_max_len < len) fall back + * to a sane minimum value and try again. + */ + if (rc == -EOPNOTSUPP) + tpm_dev.adapterlimit = + I2C_SMBUS_BLOCK_MAX; + } + + if (rc <= 0) + goto out; } } diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index e3cf9f3545c5..3b1b9f9322d5 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -397,7 +397,7 @@ static int tpm_inf_pnp_probe(struct pnp_dev *dev, int vendorid[2]; int version[2]; int productid[2]; - char chipname[20]; + const char *chipname; struct tpm_chip *chip; /* read IO-ports through PnP */ @@ -488,13 +488,13 @@ static int tpm_inf_pnp_probe(struct pnp_dev *dev, switch ((productid[0] << 8) | productid[1]) { case 6: - snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)"); + chipname = " (SLD 9630 TT 1.1)"; break; case 11: - snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)"); + chipname = " (SLB 9635 TT 1.2)"; break; default: - snprintf(chipname, sizeof(chipname), " (unknown chip)"); + chipname = " (unknown chip)"; break; } diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index c7e1384f1b08..b14d4aa97af8 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -80,6 +80,8 @@ static int has_hid(struct acpi_device *dev, const char *hid) static inline int is_itpm(struct acpi_device *dev) { + if (!dev) + return 0; return has_hid(dev, "INTC0102"); } #else @@ -89,6 +91,47 @@ static inline int is_itpm(struct acpi_device *dev) } #endif +#if defined(CONFIG_ACPI) +#define DEVICE_IS_TPM2 1 + +static const struct acpi_device_id tpm_acpi_tbl[] = { + {"MSFT0101", DEVICE_IS_TPM2}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl); + +static int check_acpi_tpm2(struct device *dev) +{ + const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); + struct acpi_table_tpm2 *tbl; + acpi_status st; + + if (!aid || aid->driver_data != DEVICE_IS_TPM2) + return 0; + + /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 + * table is mandatory + */ + st = + acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); + if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { + dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); + return -EINVAL; + } + + /* The tpm2_crb driver handles this device */ + if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) + return -ENODEV; + + return 0; +} +#else +static int check_acpi_tpm2(struct device *dev) +{ + return 0; +} +#endif + static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { @@ -141,11 +184,15 @@ static const struct tpm_tis_phy_ops tpm_tcg = { .write32 = tpm_tcg_write32, }; -static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, - acpi_handle acpi_dev_handle) +static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info) { struct tpm_tis_tcg_phy *phy; int irq = -1; + int rc; + + rc = check_acpi_tpm2(dev); + if (rc) + return rc; phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL); if (phy == NULL) @@ -158,11 +205,11 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, if (interrupts) irq = tpm_info->irq; - if (itpm) + if (itpm || is_itpm(ACPI_COMPANION(dev))) phy->priv.flags |= TPM_TIS_ITPM_WORKAROUND; return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg, - acpi_dev_handle); + ACPI_HANDLE(dev)); } static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); @@ -171,7 +218,6 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, const struct pnp_device_id *pnp_id) { struct tpm_info tpm_info = {}; - acpi_handle acpi_dev_handle = NULL; struct resource *res; res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0); @@ -184,14 +230,7 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev, else tpm_info.irq = -1; - if (pnp_acpi_device(pnp_dev)) { - if (is_itpm(pnp_acpi_device(pnp_dev))) - itpm = true; - - acpi_dev_handle = ACPI_HANDLE(&pnp_dev->dev); - } - - return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle); + return tpm_tis_init(&pnp_dev->dev, &tpm_info); } static struct pnp_device_id tpm_pnp_tbl[] = { @@ -231,93 +270,6 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); -#ifdef CONFIG_ACPI -static int tpm_check_resource(struct acpi_resource *ares, void *data) -{ - struct tpm_info *tpm_info = (struct tpm_info *) data; - struct resource res; - - if (acpi_dev_resource_interrupt(ares, 0, &res)) - tpm_info->irq = res.start; - else if (acpi_dev_resource_memory(ares, &res)) { - tpm_info->res = res; - tpm_info->res.name = NULL; - } - - return 1; -} - -static int tpm_tis_acpi_init(struct acpi_device *acpi_dev) -{ - struct acpi_table_tpm2 *tbl; - acpi_status st; - struct list_head resources; - struct tpm_info tpm_info = {}; - int ret; - - st = acpi_get_table(ACPI_SIG_TPM2, 1, - (struct acpi_table_header **) &tbl); - if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { - dev_err(&acpi_dev->dev, - FW_BUG "failed to get TPM2 ACPI table\n"); - return -EINVAL; - } - - if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; - - INIT_LIST_HEAD(&resources); - tpm_info.irq = -1; - ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource, - &tpm_info); - if (ret < 0) - return ret; - - acpi_dev_free_resource_list(&resources); - - if (resource_type(&tpm_info.res) != IORESOURCE_MEM) { - dev_err(&acpi_dev->dev, - FW_BUG "TPM2 ACPI table does not define a memory resource\n"); - return -EINVAL; - } - - if (is_itpm(acpi_dev)) - itpm = true; - - return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle); -} - -static int tpm_tis_acpi_remove(struct acpi_device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(&dev->dev); - - tpm_chip_unregister(chip); - tpm_tis_remove(chip); - - return 0; -} - -static struct acpi_device_id tpm_acpi_tbl[] = { - {"MSFT0101", 0}, /* TPM 2.0 */ - /* Add new here */ - {"", 0}, /* User Specified */ - {"", 0} /* Terminator */ -}; -MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl); - -static struct acpi_driver tis_acpi_driver = { - .name = "tpm_tis", - .ids = tpm_acpi_tbl, - .ops = { - .add = tpm_tis_acpi_init, - .remove = tpm_tis_acpi_remove, - }, - .drv = { - .pm = &tpm_tis_pm, - }, -}; -#endif - static struct platform_device *force_pdev; static int tpm_tis_plat_probe(struct platform_device *pdev) @@ -332,18 +284,16 @@ static int tpm_tis_plat_probe(struct platform_device *pdev) } tpm_info.res = *res; - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res) { - tpm_info.irq = res->start; - } else { - if (pdev == force_pdev) + tpm_info.irq = platform_get_irq(pdev, 0); + if (tpm_info.irq <= 0) { + if (pdev != force_pdev) tpm_info.irq = -1; else /* When forcing auto probe the IRQ */ tpm_info.irq = 0; } - return tpm_tis_init(&pdev->dev, &tpm_info, NULL); + return tpm_tis_init(&pdev->dev, &tpm_info); } static int tpm_tis_plat_remove(struct platform_device *pdev) @@ -371,6 +321,7 @@ static struct platform_driver tis_drv = { .name = "tpm_tis", .pm = &tpm_tis_pm, .of_match_table = of_match_ptr(tis_of_platform_match), + .acpi_match_table = ACPI_PTR(tpm_acpi_tbl), }, }; @@ -413,11 +364,6 @@ static int __init init_tis(void) if (rc) goto err_platform; -#ifdef CONFIG_ACPI - rc = acpi_bus_register_driver(&tis_acpi_driver); - if (rc) - goto err_acpi; -#endif if (IS_ENABLED(CONFIG_PNP)) { rc = pnp_register_driver(&tis_pnp_driver); @@ -428,10 +374,6 @@ static int __init init_tis(void) return 0; err_pnp: -#ifdef CONFIG_ACPI - acpi_bus_unregister_driver(&tis_acpi_driver); -err_acpi: -#endif platform_driver_unregister(&tis_drv); err_platform: if (force_pdev) @@ -443,9 +385,6 @@ err_force: static void __exit cleanup_tis(void) { pnp_unregister_driver(&tis_pnp_driver); -#ifdef CONFIG_ACPI - acpi_bus_unregister_driver(&tis_acpi_driver); -#endif platform_driver_unregister(&tis_drv); if (force_pdev) diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 751059d2140a..1d877cc9af97 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -43,6 +43,7 @@ struct proxy_dev { #define STATE_OPENED_FLAG BIT(0) #define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */ #define STATE_REGISTERED_FLAG BIT(2) +#define STATE_DRIVER_COMMAND BIT(3) /* sending a driver specific command */ size_t req_len; /* length of queued TPM request */ size_t resp_len; /* length of queued TPM response */ @@ -299,6 +300,28 @@ out: return len; } +static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, + u8 *buf, size_t count) +{ + struct tpm_input_header *hdr = (struct tpm_input_header *)buf; + + if (count < sizeof(struct tpm_input_header)) + return 0; + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + switch (be32_to_cpu(hdr->ordinal)) { + case TPM2_CC_SET_LOCALITY: + return 1; + } + } else { + switch (be32_to_cpu(hdr->ordinal)) { + case TPM_ORD_SET_LOCALITY: + return 1; + } + } + return 0; +} + /* * Called when core TPM driver forwards TPM requests to 'server side'. * @@ -321,6 +344,10 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) return -EIO; } + if (!(proxy_dev->state & STATE_DRIVER_COMMAND) && + vtpm_proxy_is_driver_command(chip, buf, count)) + return -EFAULT; + mutex_lock(&proxy_dev->buf_lock); if (!(proxy_dev->state & STATE_OPENED_FLAG)) { @@ -371,6 +398,47 @@ static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status) return ret; } +static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) +{ + struct tpm_buf buf; + int rc; + const struct tpm_output_header *header; + struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, + TPM2_CC_SET_LOCALITY); + else + rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, + TPM_ORD_SET_LOCALITY); + if (rc) + return rc; + tpm_buf_append_u8(&buf, locality); + + proxy_dev->state |= STATE_DRIVER_COMMAND; + + rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0, + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, + "attempting to set locality"); + + proxy_dev->state &= ~STATE_DRIVER_COMMAND; + + if (rc < 0) { + locality = rc; + goto out; + } + + header = (const struct tpm_output_header *)buf.data; + rc = be32_to_cpu(header->return_code); + if (rc) + locality = -1; + +out: + tpm_buf_destroy(&buf); + + return locality; +} + static const struct tpm_class_ops vtpm_proxy_tpm_ops = { .flags = TPM_OPS_AUTO_STARTUP, .recv = vtpm_proxy_tpm_op_recv, @@ -380,6 +448,7 @@ static const struct tpm_class_ops vtpm_proxy_tpm_ops = { .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG, .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG, .req_canceled = vtpm_proxy_tpm_req_canceled, + .request_locality = vtpm_proxy_request_locality, }; /* diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index c636e7fdd1f5..1a0e97a5da5a 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -45,7 +45,7 @@ static int tpmrm_release(struct inode *inode, struct file *file) return 0; } -ssize_t tpmrm_write(struct file *file, const char __user *buf, +static ssize_t tpmrm_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct file_priv *fpriv = file->private_data; diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 6ebd9ad95010..e3cdafff8ece 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -10,7 +10,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ - multicast.o mad.o smi.o agent.o mad_rmpp.o + multicast.o mad.o smi.o agent.o mad_rmpp.o \ + security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b1371eb9f46c..efc94304dee3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -53,6 +53,7 @@ struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; + bool enforce_security; }; union ib_gid zgid; @@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx) +{ + unsigned long flags; + int p; + + if (port_num < rdma_start_port(device) || + port_num > rdma_end_port(device)) + return -EINVAL; + + p = port_num - rdma_start_port(device); + read_lock_irqsave(&device->cache.lock, flags); + *sn_pfx = device->cache.ports[p].subnet_prefix; + read_unlock_irqrestore(&device->cache.lock, flags); + + return 0; +} +EXPORT_SYMBOL(ib_get_cached_subnet_prefix); + int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, @@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device *device, EXPORT_SYMBOL(ib_get_cached_port_state); static void ib_cache_update(struct ib_device *device, - u8 port) + u8 port, + bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device, device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; + device->cache.ports[port - rdma_start_port(device)].subnet_prefix = + tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); + if (enforce_security) + ib_security_cache_change(device, + port, + tprops->subnet_prefix); + kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); @@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work) struct ib_update_work *work = container_of(_work, struct ib_update_work, work); - ib_cache_update(work->device, work->port_num); + ib_cache_update(work->device, + work->port_num, + work->enforce_security); kfree(work); } @@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; + if (event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_GID_CHANGE) + work->enforce_security = true; + else + work->enforce_security = false; + queue_work(ib_wq, &work->work); } } @@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device) goto out; for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) - ib_cache_update(device, p + rdma_start_port(device)); + ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index d92ab4eaa8f3..11ae67514e13 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,6 +38,16 @@ #include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_mad.h> +#include "mad_priv.h" + +struct pkey_index_qp_list { + struct list_head pkey_index_list; + u16 pkey_index; + /* Lock to hold while iterating the qp_list. */ + spinlock_t qp_list_lock; + struct list_head qp_list; +}; #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) int cma_configfs_init(void); @@ -186,4 +196,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_callback *cb); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx); + +#ifdef CONFIG_SECURITY_INFINIBAND +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec); + +void ib_security_destroy_port_pkey_list(struct ib_device *device); + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix); + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata); + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_destroy_qp_security_begin(struct ib_qp_security *sec); +void ib_destroy_qp_security_abort(struct ib_qp_security *sec); +void ib_destroy_qp_security_end(struct ib_qp_security *sec); +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_close_shared_qp_security(struct ib_qp_security *sec); +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type); +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); +#else +static inline int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + return 0; +} + +static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ +} + +static inline void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ +} + +static inline int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + return qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); +} + +static inline int ib_create_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ +} + +static inline int ib_open_shared_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ +} + +static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + return 0; +} + +static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ +} + +static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, + u16 pkey_index) +{ + return 0; +} +#endif #endif /* _CORE_PRIV_H */ diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 81d447da0048..631eaa9daf65 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -39,6 +39,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/security.h> +#include <linux/notifier.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -82,6 +84,14 @@ static LIST_HEAD(client_list); static DEFINE_MUTEX(device_mutex); static DECLARE_RWSEM(lists_rwsem); +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data); +static void ib_policy_change_task(struct work_struct *work); +static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); + +static struct notifier_block ibdev_lsm_nb = { + .notifier_call = ib_security_change, +}; static int ib_device_check_mandatory(struct ib_device *device) { @@ -325,6 +335,64 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) } EXPORT_SYMBOL(ib_get_device_fw_str); +static int setup_port_pkey_list(struct ib_device *device) +{ + int i; + + /** + * device->port_pkey_list is indexed directly by the port number, + * Therefore it is declared as a 1 based array with potential empty + * slots at the beginning. + */ + device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, + sizeof(*device->port_pkey_list), + GFP_KERNEL); + + if (!device->port_pkey_list) + return -ENOMEM; + + for (i = 0; i < (rdma_end_port(device) + 1); i++) { + spin_lock_init(&device->port_pkey_list[i].list_lock); + INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); + } + + return 0; +} + +static void ib_policy_change_task(struct work_struct *work) +{ + struct ib_device *dev; + + down_read(&lists_rwsem); + list_for_each_entry(dev, &device_list, core_list) { + int i; + + for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { + u64 sp; + int ret = ib_get_cached_subnet_prefix(dev, + i, + &sp); + + WARN_ONCE(ret, + "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", + ret); + ib_security_cache_change(dev, i, sp); + } + } + up_read(&lists_rwsem); +} + +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data) +{ + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + schedule_work(&ib_policy_change_work); + + return NOTIFY_OK; +} + /** * ib_register_device - Register an IB device with IB core * @device:Device to register @@ -385,6 +453,12 @@ int ib_register_device(struct ib_device *device, goto out; } + ret = setup_port_pkey_list(device); + if (ret) { + pr_warn("Couldn't create per port_pkey_list\n"); + goto out; + } + ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); @@ -468,6 +542,9 @@ void ib_unregister_device(struct ib_device *device) ib_device_unregister_sysfs(device); ib_cache_cleanup_one(device); + ib_security_destroy_port_pkey_list(device); + kfree(device->port_pkey_list); + down_write(&lists_rwsem); spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) @@ -1082,10 +1159,18 @@ static int __init ib_core_init(void) goto err_sa; } + ret = register_lsm_notifier(&ibdev_lsm_nb); + if (ret) { + pr_warn("Couldn't register LSM notifier. ret %d\n", ret); + goto err_ibnl_clients; + } + ib_cache_setup(); return 0; +err_ibnl_clients: + ib_remove_ibnl_clients(); err_sa: ib_sa_cleanup(); err_mad: @@ -1105,6 +1190,7 @@ err: static void __exit ib_core_cleanup(void) { + unregister_lsm_notifier(&ibdev_lsm_nb); ib_cache_cleanup(); ib_remove_ibnl_clients(); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 192ee3dafb80..f8f53bb90837 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -40,9 +40,11 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/security.h> #include <rdma/ib_cache.h> #include "mad_priv.h" +#include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); + ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); + if (ret2) { + ret = ERR_PTR(ret2); + goto error4; + } + spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (method) { if (method_in_use(&method, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (is_vendor_method_in_use( vendor_class, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); - goto error4; + goto error5; } } @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; - -error4: +error5: spin_unlock_irqrestore(&port_priv->reg_lock, flags); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); +error4: kfree(reg_req); error3: kfree(mad_agent_priv); @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; + int err; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); + + err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); + if (err) { + ret = ERR_PTR(err); + goto error2; + } + mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); - goto error2; + goto error3; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; - +error3: + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); error2: kfree(mad_snoop_priv); error1: @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); + kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv); } @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); + kfree(mad_snoop_priv); } @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { - mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; + ret = ib_mad_enforce_security(mad_agent_priv, + mad_send_wr->send_wr.pkey_index); + if (ret) + goto error; + if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; + int ret; + + ret = ib_mad_enforce_security(mad_agent_priv, + mad_recv_wc->wc->pkey_index); + if (ret) { + ib_free_recv_mad(mad_recv_wc); + deref_mad_agent(mad_agent_priv); + } INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } + + return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c new file mode 100644 index 000000000000..3e8c38953912 --- /dev/null +++ b/drivers/infiniband/core/security.c @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_SECURITY_INFINIBAND + +#include <linux/security.h> +#include <linux/completion.h> +#include <linux/list.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> +#include "core_priv.h" +#include "mad_priv.h" + +static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey = NULL; + struct pkey_index_qp_list *tmp_pkey; + struct ib_device *dev = pp->sec->dev; + + spin_lock(&dev->port_pkey_list[pp->port_num].list_lock); + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[pp->port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + pkey = tmp_pkey; + break; + } + } + spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock); + return pkey; +} + +static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, + u16 *pkey, + u64 *subnet_prefix) +{ + struct ib_device *dev = pp->sec->dev; + int ret; + + ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); + + return ret; +} + +static int enforce_qp_pkey_security(u16 pkey, + u64 subnet_prefix, + struct ib_qp_security *qp_sec) +{ + struct ib_qp_security *shared_qp_sec; + int ret; + + ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); + if (ret) + return ret; + + if (qp_sec->qp == qp_sec->qp->real_qp) { + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; + } + } + return 0; +} + +/* The caller of this function must hold the QP security + * mutex of the QP of the security structure in *pps. + * + * It takes separate ports_pkeys and security structure + * because in some cases the pps will be for a new settings + * or the pps will be for the real QP and security structure + * will be for a shared QP. + */ +static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, + struct ib_qp_security *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret = 0; + + if (!pps) + return 0; + + if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { + get_pkey_and_subnet_prefix(&pps->main, + &pkey, + &subnet_prefix); + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + if (ret) + return ret; + + if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { + get_pkey_and_subnet_prefix(&pps->alt, + &pkey, + &subnet_prefix); + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void qp_to_error(struct ib_qp_security *sec) +{ + struct ib_qp_security *shared_qp_sec; + struct ib_qp_attr attr = { + .qp_state = IB_QPS_ERR + }; + struct ib_event event = { + .event = IB_EVENT_QP_FATAL + }; + + /* If the QP is in the process of being destroyed + * the qp pointer in the security structure is + * undefined. It cannot be modified now. + */ + if (sec->destroying) + return; + + ib_modify_qp(sec->qp, + &attr, + IB_QP_STATE); + + if (sec->qp->event_handler && sec->qp->qp_context) { + event.element.qp = sec->qp; + sec->qp->event_handler(&event, + sec->qp->qp_context); + } + + list_for_each_entry(shared_qp_sec, + &sec->shared_qp_list, + shared_qp_list) { + struct ib_qp *qp = shared_qp_sec->qp; + + if (qp->event_handler && qp->qp_context) { + event.element.qp = qp; + event.device = qp->device; + qp->event_handler(&event, + qp->qp_context); + } + } +} + +static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, + struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct ib_port_pkey *pp, *tmp_pp; + bool comp; + LIST_HEAD(to_error_list); + u16 pkey_val; + + if (!ib_get_cached_pkey(device, + port_num, + pkey->pkey_index, + &pkey_val)) { + spin_lock(&pkey->qp_list_lock); + list_for_each_entry(pp, &pkey->qp_list, qp_list) { + if (atomic_read(&pp->sec->error_list_count)) + continue; + + if (enforce_qp_pkey_security(pkey_val, + subnet_prefix, + pp->sec)) { + atomic_inc(&pp->sec->error_list_count); + list_add(&pp->to_error_list, + &to_error_list); + } + } + spin_unlock(&pkey->qp_list_lock); + } + + list_for_each_entry_safe(pp, + tmp_pp, + &to_error_list, + to_error_list) { + mutex_lock(&pp->sec->mutex); + qp_to_error(pp->sec); + list_del(&pp->to_error_list); + atomic_dec(&pp->sec->error_list_count); + comp = pp->sec->destroying; + mutex_unlock(&pp->sec->mutex); + + if (comp) + complete(&pp->sec->error_complete); + } +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static int port_pkey_list_insert(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *tmp_pkey; + struct pkey_index_qp_list *pkey; + struct ib_device *dev; + u8 port_num = pp->port_num; + int ret = 0; + + if (pp->state != IB_PORT_PKEY_VALID) + return 0; + + dev = pp->sec->dev; + + pkey = get_pkey_idx_qp_list(pp); + + if (!pkey) { + bool found = false; + + pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); + if (!pkey) + return -ENOMEM; + + spin_lock(&dev->port_pkey_list[port_num].list_lock); + /* Check for the PKey again. A racing process may + * have created it. + */ + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + kfree(pkey); + pkey = tmp_pkey; + found = true; + break; + } + } + + if (!found) { + pkey->pkey_index = pp->pkey_index; + spin_lock_init(&pkey->qp_list_lock); + INIT_LIST_HEAD(&pkey->qp_list); + list_add(&pkey->pkey_index_list, + &dev->port_pkey_list[port_num].pkey_list); + } + spin_unlock(&dev->port_pkey_list[port_num].list_lock); + } + + spin_lock(&pkey->qp_list_lock); + list_add(&pp->qp_list, &pkey->qp_list); + spin_unlock(&pkey->qp_list_lock); + + pp->state = IB_PORT_PKEY_LISTED; + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void port_pkey_list_remove(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey; + + if (pp->state != IB_PORT_PKEY_LISTED) + return; + + pkey = get_pkey_idx_qp_list(pp); + + spin_lock(&pkey->qp_list_lock); + list_del(&pp->qp_list); + spin_unlock(&pkey->qp_list_lock); + + /* The setting may still be valid, i.e. after + * a destroy has failed for example. + */ + pp->state = IB_PORT_PKEY_VALID; +} + +static void destroy_qp_security(struct ib_qp_security *sec) +{ + security_ib_free_security(sec->security); + kfree(sec->ports_pkeys); + kfree(sec); +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, + const struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + struct ib_ports_pkeys *new_pps; + struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; + + new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); + if (!new_pps) + return NULL; + + if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { + if (!qp_pps) { + new_pps->main.port_num = qp_attr->port_num; + new_pps->main.pkey_index = qp_attr->pkey_index; + } else { + new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? + qp_attr->port_num : + qp_pps->main.port_num; + + new_pps->main.pkey_index = + (qp_attr_mask & IB_QP_PKEY_INDEX) ? + qp_attr->pkey_index : + qp_pps->main.pkey_index; + } + new_pps->main.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->main.port_num = qp_pps->main.port_num; + new_pps->main.pkey_index = qp_pps->main.pkey_index; + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) + new_pps->main.state = IB_PORT_PKEY_VALID; + } + + if (qp_attr_mask & IB_QP_ALT_PATH) { + new_pps->alt.port_num = qp_attr->alt_port_num; + new_pps->alt.pkey_index = qp_attr->alt_pkey_index; + new_pps->alt.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->alt.port_num = qp_pps->alt.port_num; + new_pps->alt.pkey_index = qp_pps->alt.pkey_index; + if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) + new_pps->alt.state = IB_PORT_PKEY_VALID; + } + + new_pps->main.sec = qp->qp_sec; + new_pps->alt.sec = qp->qp_sec; + return new_pps; +} + +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + struct ib_qp *real_qp = qp->real_qp; + int ret; + + ret = ib_create_qp_security(qp, dev); + + if (ret) + return ret; + + mutex_lock(&real_qp->qp_sec->mutex); + ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, + qp->qp_sec); + + if (ret) + goto ret; + + if (qp != real_qp) + list_add(&qp->qp_sec->shared_qp_list, + &real_qp->qp_sec->shared_qp_list); +ret: + mutex_unlock(&real_qp->qp_sec->mutex); + if (ret) + destroy_qp_security(qp->qp_sec); + + return ret; +} + +void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ + struct ib_qp *real_qp = sec->qp->real_qp; + + mutex_lock(&real_qp->qp_sec->mutex); + list_del(&sec->shared_qp_list); + mutex_unlock(&real_qp->qp_sec->mutex); + + destroy_qp_security(sec); +} + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + int ret; + + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); + if (!qp->qp_sec) + return -ENOMEM; + + qp->qp_sec->qp = qp; + qp->qp_sec->dev = dev; + mutex_init(&qp->qp_sec->mutex); + INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); + atomic_set(&qp->qp_sec->error_list_count, 0); + init_completion(&qp->qp_sec->error_complete); + ret = security_ib_alloc_security(&qp->qp_sec->security); + if (ret) + kfree(qp->qp_sec); + + return ret; +} +EXPORT_SYMBOL(ib_create_qp_security); + +void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ + mutex_lock(&sec->mutex); + + /* Remove the QP from the lists so it won't get added to + * a to_error_list during the destroy process. + */ + if (sec->ports_pkeys) { + port_pkey_list_remove(&sec->ports_pkeys->main); + port_pkey_list_remove(&sec->ports_pkeys->alt); + } + + /* If the QP is already in one or more of those lists + * the destroying flag will ensure the to error flow + * doesn't operate on an undefined QP. + */ + sec->destroying = true; + + /* Record the error list count to know how many completions + * to wait for. + */ + sec->error_comps_pending = atomic_read(&sec->error_list_count); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ + int ret; + int i; + + /* If a concurrent cache update is in progress this + * QP security could be marked for an error state + * transition. Wait for this to complete. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + mutex_lock(&sec->mutex); + sec->destroying = false; + + /* Restore the position in the lists and verify + * access is still allowed in case a cache update + * occurred while attempting to destroy. + * + * Because these setting were listed already + * and removed during ib_destroy_qp_security_begin + * we know the pkey_index_qp_list for the PKey + * already exists so port_pkey_list_insert won't fail. + */ + if (sec->ports_pkeys) { + port_pkey_list_insert(&sec->ports_pkeys->main); + port_pkey_list_insert(&sec->ports_pkeys->alt); + } + + ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); + if (ret) + qp_to_error(sec); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ + int i; + + /* If a concurrent cache update is occurring we must + * wait until this QP security structure is processed + * in the QP to error flow before destroying it because + * the to_error_list is in use. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + destroy_qp_security(sec); +} + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct pkey_index_qp_list *pkey; + + list_for_each_entry(pkey, + &device->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + check_pkey_qps(pkey, + device, + port_num, + subnet_prefix); + } +} + +void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ + struct pkey_index_qp_list *pkey, *tmp_pkey; + int i; + + for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { + spin_lock(&device->port_pkey_list[i].list_lock); + list_for_each_entry_safe(pkey, + tmp_pkey, + &device->port_pkey_list[i].pkey_list, + pkey_index_list) { + list_del(&pkey->pkey_index_list); + kfree(pkey); + } + spin_unlock(&device->port_pkey_list[i].list_lock); + } +} + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + int ret = 0; + struct ib_ports_pkeys *tmp_pps; + struct ib_ports_pkeys *new_pps; + bool special_qp = (qp->qp_type == IB_QPT_SMI || + qp->qp_type == IB_QPT_GSI || + qp->qp_type >= IB_QPT_RESERVED1); + bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || + (qp_attr_mask & IB_QP_ALT_PATH)); + + if (pps_change && !special_qp) { + mutex_lock(&qp->qp_sec->mutex); + new_pps = get_new_pps(qp, + qp_attr, + qp_attr_mask); + + /* Add this QP to the lists for the new port + * and pkey settings before checking for permission + * in case there is a concurrent cache update + * occurring. Walking the list for a cache change + * doesn't acquire the security mutex unless it's + * sending the QP to error. + */ + ret = port_pkey_list_insert(&new_pps->main); + + if (!ret) + ret = port_pkey_list_insert(&new_pps->alt); + + if (!ret) + ret = check_qp_port_pkey_settings(new_pps, + qp->qp_sec); + } + + if (!ret) + ret = qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); + + if (pps_change && !special_qp) { + /* Clean up the lists and free the appropriate + * ports_pkeys structure. + */ + if (ret) { + tmp_pps = new_pps; + } else { + tmp_pps = qp->qp_sec->ports_pkeys; + qp->qp_sec->ports_pkeys = new_pps; + } + + if (tmp_pps) { + port_pkey_list_remove(&tmp_pps->main); + port_pkey_list_remove(&tmp_pps->alt); + } + kfree(tmp_pps); + mutex_unlock(&qp->qp_sec->mutex); + } + return ret; +} +EXPORT_SYMBOL(ib_security_modify_qp); + +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret; + + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); + + if (ret) + return ret; + + return security_ib_pkey_access(sec, subnet_prefix, pkey); +} +EXPORT_SYMBOL(ib_security_pkey_access); + +static int ib_mad_agent_security_change(struct notifier_block *nb, + unsigned long event, + void *data) +{ + struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); + + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security, + ag->device->name, + ag->port_num); + + return NOTIFY_OK; +} + +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + int ret; + + ret = security_ib_alloc_security(&agent->security); + if (ret) + return ret; + + if (qp_type != IB_QPT_SMI) + return 0; + + ret = security_ib_endport_manage_subnet(agent->security, + agent->device->name, + agent->port_num); + if (ret) + return ret; + + agent->lsm_nb.notifier_call = ib_mad_agent_security_change; + ret = register_lsm_notifier(&agent->lsm_nb); + if (ret) + return ret; + + agent->smp_allowed = true; + agent->lsm_nb_reg = true; + return 0; +} + +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ + security_ib_free_security(agent->security); + if (agent->lsm_nb_reg) + unregister_lsm_notifier(&agent->lsm_nb); +} + +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) +{ + int ret; + + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) + return -EACCES; + + ret = ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); + + if (ret) + return ret; + + return 0; +} + +#endif /* CONFIG_SECURITY_INFINIBAND */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 70b7fb156414..0ad3b05405d8 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1508,6 +1508,10 @@ static int create_qp(struct ib_uverbs_file *file, } if (cmd->qp_type != IB_QPT_XRC_TGT) { + ret = ib_create_qp_security(qp, device); + if (ret) + goto err_cb; + qp->real_qp = qp; qp->device = device; qp->pd = pd; @@ -2002,14 +2006,17 @@ static int modify_qp(struct ib_uverbs_file *file, if (ret) goto release_qp; } - ret = qp->device->modify_qp(qp, attr, + ret = ib_security_modify_qp(qp, + attr, modify_qp_mask(qp->qp_type, cmd->base.attr_mask), udata); } else { - ret = ib_modify_qp(qp, attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask)); + ret = ib_security_modify_qp(qp, + attr, + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask), + NULL); } release_qp: diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4792f5209ac2..c973a83c898b 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -44,6 +44,7 @@ #include <linux/in.h> #include <linux/in6.h> #include <net/addrconf.h> +#include <linux/security.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> @@ -713,12 +714,20 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, { struct ib_qp *qp; unsigned long flags; + int err; qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->real_qp = real_qp; + err = ib_open_shared_qp_security(qp, real_qp->device); + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + qp->real_qp = real_qp; atomic_inc(&real_qp->usecnt); qp->device = real_qp->device; qp->event_handler = event_handler; @@ -804,6 +813,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, if (IS_ERR(qp)) return qp; + ret = ib_create_qp_security(qp, device); + if (ret) { + ib_destroy_qp(qp); + return ERR_PTR(ret); + } + qp->device = device; qp->real_qp = qp; qp->uobject = NULL; @@ -1266,7 +1281,7 @@ int ib_modify_qp(struct ib_qp *qp, return ret; } - return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); + return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); @@ -1295,6 +1310,7 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; @@ -1335,6 +1351,7 @@ int ib_destroy_qp(struct ib_qp *qp) struct ib_cq *scq, *rcq; struct ib_srq *srq; struct ib_rwq_ind_table *ind_tbl; + struct ib_qp_security *sec; int ret; WARN_ON_ONCE(qp->mrs_used > 0); @@ -1350,6 +1367,9 @@ int ib_destroy_qp(struct ib_qp *qp) rcq = qp->recv_cq; srq = qp->srq; ind_tbl = qp->rwq_ind_tbl; + sec = qp->qp_sec; + if (sec) + ib_destroy_qp_security_begin(sec); if (!qp->uobject) rdma_rw_cleanup_mrs(qp); @@ -1366,6 +1386,11 @@ int ib_destroy_qp(struct ib_qp *qp) atomic_dec(&srq->usecnt); if (ind_tbl) atomic_dec(&ind_tbl->usecnt); + if (sec) + ib_destroy_qp_security_end(sec); + } else { + if (sec) + ib_destroy_qp_security_abort(sec); } return ret; |