Message ID | 20250317092946.265146293@linutronix.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | genirq/msi: Spring cleaning | expand |
On Mon, 2025-03-17 at 14:29 +0100, Thomas Gleixner wrote: > The driver abuses the MSI descriptors for internal purposes. Aside of > core code and MSI providers nothing has to care about their > existence. They have been encapsulated with a lot of effort because > this kind of abuse caused all sorts of issues including a > maintainability nightmare. > > Rewrite the code so it uses dedicated storage to hand the required > information to the interrupt handler. > > No functional change intended. > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> > Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> > Cc: "Martin K. Petersen" <martin.petersen@oracle.com> > Cc: linux-scsi@vger.kernel.org > > > --- > drivers/ufs/host/ufs-qcom.c | 77 ++++++++++++++++++++++----------- > ----------- > 1 file changed, 40 insertions(+), 37 deletions(-) > > --- a/drivers/ufs/host/ufs-qcom.c > +++ b/drivers/ufs/host/ufs-qcom.c > @@ -1782,15 +1782,19 @@ static void ufs_qcom_write_msi_msg(struc > ufshcd_mcq_config_esi(hba, msg); > } > > +struct ufs_qcom_irq { > + unsigned int irq; > + unsigned int idx; > + struct ufs_hba *hba; > +}; > + > static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) > { > - struct msi_desc *desc = data; > - struct device *dev = msi_desc_to_dev(desc); > - struct ufs_hba *hba = dev_get_drvdata(dev); > - u32 id = desc->msi_index; > - struct ufs_hw_queue *hwq = &hba->uhq[id]; > + struct ufs_qcom_irq *qi = data; > + struct ufs_hba *hba = qi->hba; > + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; > > - ufshcd_mcq_write_cqis(hba, 0x1, id); > + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); > ufshcd_mcq_poll_cqe_lock(hba, hwq); > > return IRQ_HANDLED; > @@ -1799,8 +1803,7 @@ static irqreturn_t ufs_qcom_mcq_esi_hand > static int ufs_qcom_config_esi(struct ufs_hba *hba) > { > struct ufs_qcom_host *host = ufshcd_get_variant(hba); > - struct msi_desc *desc; > - struct msi_desc *failed_desc = NULL; > + struct ufs_qcom_irq *qi; > int nr_irqs, ret; > > if (host->esi_enabled) > @@ -1811,47 +1814,47 @@ static int ufs_qcom_config_esi(struct uf > * 2. Poll queues do not need ESI. > */ > nr_irqs = hba->nr_hw_queues - hba- > >nr_queues[HCTX_TYPE_POLL]; > + qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), > GFP_KERNEL); > + if (qi) Typo causing logic inversion: should be !qi here (you need a more responsive ! key). > + return -ENOMEM; > + > ret = platform_device_msi_init_and_alloc_irqs(hba->dev, > nr_irqs, > > ufs_qcom_write_msi_msg); > if (ret) { > dev_err(hba->dev, "Failed to request Platform MSI > %d\n", ret); > - return ret; > + goto cleanup; > } > > - msi_lock_descs(hba->dev); > - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { > - ret = devm_request_irq(hba->dev, desc->irq, > - ufs_qcom_mcq_esi_handler, > - IRQF_SHARED, "qcom-mcq-esi", > desc); > + for (int idx = 0; idx < nr_irqs; idx++) { > + qi[idx].irq = msi_get_virq(hba->dev, idx); > + qi[idx].idx = idx; > + qi[idx].hba = hba; > + > + ret = devm_request_irq(hba->dev, qi[idx].irq, > ufs_qcom_mcq_esi_handler, > + IRQF_SHARED, "qcom-mcq-esi", > qi + idx); > if (ret) { > dev_err(hba->dev, "%s: Fail to request IRQ > for %d, err = %d\n", > - __func__, desc->irq, ret); > - failed_desc = desc; > - break; > + __func__, qi[idx].irq, ret); > + qi[idx].irq = 0; > + goto cleanup; > } > } > - msi_unlock_descs(hba->dev); > > - if (ret) { > - /* Rewind */ > - msi_lock_descs(hba->dev); > - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { > - if (desc == failed_desc) > - break; > - devm_free_irq(hba->dev, desc->irq, hba); > - } > - msi_unlock_descs(hba->dev); > - platform_device_msi_free_irqs_all(hba->dev); > - } else { > - if (host->hw_ver.major == 6 && host->hw_ver.minor == > 0 && > - host->hw_ver.step == 0) > - ufshcd_rmwl(hba, ESI_VEC_MASK, > - FIELD_PREP(ESI_VEC_MASK, > MAX_ESI_VEC - 1), > - REG_UFS_CFG3); > - ufshcd_mcq_enable_esi(hba); > - host->esi_enabled = true; > + if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && > + host->hw_ver.step == 0) { > + ufshcd_rmwl(hba, ESI_VEC_MASK, > + FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - > 1), > + REG_UFS_CFG3); > } > - > + ufshcd_mcq_enable_esi(hba); > + host->esi_enabled = true; > + return 0; > + > +cleanup: > + for (int idx = 0; qi[idx].irq; idx++) > + devm_free_irq(hba->dev, qi[idx].irq, hba); > + platform_device_msi_free_irqs_all(hba->dev); > + devm_kfree(hba->dev, qi); > return ret; > } This does seem to be exactly the pattern you describe in 1/10, although I'm not entirely convinced that something like the below is more readable and safe. Regards, James --- diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 23b9f6efa047..26b0c665c3b7 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1782,25 +1782,37 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; } +DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, + if (_T) { \ + for (int idx = 0; _T[idx].irq; idx++) \ + devm_free_irq(_T[idx].hba->dev, _T[idx].irq, \ + _T[idx].hba); \ + platform_device_msi_free_irqs_all(_T[0].hba->dev); \ + devm_kfree(_T[0].hba->dev, _T); \ + }) + static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; + struct ufs_qcom_irq *qi __free(ufs_qcom_irq); int nr_irqs, ret; if (host->esi_enabled) @@ -1811,6 +1823,11 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + if (!qi) + return -ENOMEM; + qi[0].hba = hba; /* required by __free() */ + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { @@ -1818,41 +1835,31 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return ret; } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + __func__, qi[idx].irq, ret); + qi[idx].irq = 0; + return ret; } } - msi_unlock_descs(hba->dev); + retain_ptr(qi); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); - } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; + if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_rmwl(hba, ESI_VEC_MASK, + FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); } - - return ret; + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; } /*
On Mon, Mar 17 2025 at 12:26, James Bottomley wrote: >> nr_irqs = hba->nr_hw_queues - hba- >> >nr_queues[HCTX_TYPE_POLL]; >> + qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), >> GFP_KERNEL); >> + if (qi) > > Typo causing logic inversion: should be !qi here (you need a more > responsive ! key). Duh. >> +cleanup: >> + for (int idx = 0; qi[idx].irq; idx++) >> + devm_free_irq(hba->dev, qi[idx].irq, hba); >> + platform_device_msi_free_irqs_all(hba->dev); >> + devm_kfree(hba->dev, qi); >> return ret; >> } > > This does seem to be exactly the pattern you describe in 1/10, although True. > I'm not entirely convinced that something like the below is more > readable and safe. At least the cleanup wants to be in a C function and not hideously hidden in the DEFINE_FREE() macro. Let me respin that. Thanks, tglx
--- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1782,15 +1782,19 @@ static void ufs_qcom_write_msi_msg(struc ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; @@ -1799,8 +1803,7 @@ static irqreturn_t ufs_qcom_mcq_esi_hand static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; + struct ufs_qcom_irq *qi; int nr_irqs, ret; if (host->esi_enabled) @@ -1811,47 +1814,47 @@ static int ufs_qcom_config_esi(struct uf * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + if (qi) + return -ENOMEM; + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); - return ret; + goto cleanup; } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + __func__, qi[idx].irq, ret); + qi[idx].irq = 0; + goto cleanup; } } - msi_unlock_descs(hba->dev); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); - } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; + if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_rmwl(hba, ESI_VEC_MASK, + FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); } - + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; + +cleanup: + for (int idx = 0; qi[idx].irq; idx++) + devm_free_irq(hba->dev, qi[idx].irq, hba); + platform_device_msi_free_irqs_all(hba->dev); + devm_kfree(hba->dev, qi); return ret; }
The driver abuses the MSI descriptors for internal purposes. Aside of core code and MSI providers nothing has to care about their existence. They have been encapsulated with a lot of effort because this kind of abuse caused all sorts of issues including a maintainability nightmare. Rewrite the code so it uses dedicated storage to hand the required information to the interrupt handler. No functional change intended. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: linux-scsi@vger.kernel.org --- drivers/ufs/host/ufs-qcom.c | 77 ++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 37 deletions(-)