Message ID | 20240109114220.30243-4-quic_bibekkum@quicinc.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | iommu/arm-smmu: introduction of ACTLR implementation for Qualcomm SoCs | expand |
On Tue, Jan 09, 2024 at 05:12:18PM +0530, Bibek Kumar Patro wrote: > static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, > struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) > { > + struct arm_smmu_device *smmu = smmu_domain->smmu; > + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); > + const struct actlr_variant *actlrvar; > + int cbndx = smmu_domain->cfg.cbndx; > struct adreno_smmu_priv *priv; > > smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; > @@ -248,6 +285,16 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, > priv->set_stall = qcom_adreno_smmu_set_stall; > priv->resume_translation = qcom_adreno_smmu_resume_translation; > > + if (qsmmu->data->actlrvar) { > + actlrvar = qsmmu->data->actlrvar; > + for (; actlrvar->io_start; actlrvar++) { > + if (actlrvar->io_start == smmu->ioaddr) { > + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); > + break; > + } > + } > + } > + > return 0; > } > > @@ -274,6 +321,21 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { > static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, > struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) > { > + struct arm_smmu_device *smmu = smmu_domain->smmu; > + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); > + const struct actlr_variant *actlrvar; > + int cbndx = smmu_domain->cfg.cbndx; > + > + if (qsmmu->data->actlrvar) { > + actlrvar = qsmmu->data->actlrvar; > + for (; actlrvar->io_start; actlrvar++) { > + if (actlrvar->io_start == smmu->ioaddr) { > + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); > + break; > + } > + } > + } > + This block and the one in qcom_adreno_smmu_init_context() are exactly the same. Possible to do some refactoring? > smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; > > return 0; > diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h > index f3b91963e234..29d26dfa2ed9 100644 > --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h > +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h > @@ -1,6 +1,6 @@ > /* SPDX-License-Identifier: GPL-2.0-only */ > /* > - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. > + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. > */ > It should be 2022-2023 . > #ifndef _ARM_SMMU_QCOM_H > @@ -24,8 +24,17 @@ struct qcom_smmu_config { > const u32 *reg_offset; > }; > > +struct actlr_config; > + > +struct actlr_variant { > + const struct actlr_config *actlrcfg; > + const resource_size_t io_start; > +}; > + > struct qcom_smmu_match_data { > + const struct actlr_variant *actlrvar; > const struct qcom_smmu_config *cfg; > + const int num_smmu; > const struct arm_smmu_impl *impl; > const struct arm_smmu_impl *adreno_impl; > }; qcom_smmu_match_data::num_smmu needs cleanup. Thanks, Pavan
On 1/10/2024 9:36 AM, Pavan Kondeti wrote: > On Tue, Jan 09, 2024 at 05:12:18PM +0530, Bibek Kumar Patro wrote: >> static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, >> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >> { >> + struct arm_smmu_device *smmu = smmu_domain->smmu; >> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >> + const struct actlr_variant *actlrvar; >> + int cbndx = smmu_domain->cfg.cbndx; >> struct adreno_smmu_priv *priv; >> >> smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; >> @@ -248,6 +285,16 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, >> priv->set_stall = qcom_adreno_smmu_set_stall; >> priv->resume_translation = qcom_adreno_smmu_resume_translation; >> >> + if (qsmmu->data->actlrvar) { >> + actlrvar = qsmmu->data->actlrvar; >> + for (; actlrvar->io_start; actlrvar++) { >> + if (actlrvar->io_start == smmu->ioaddr) { >> + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >> + break; >> + } >> + } >> + } >> + >> return 0; >> } >> >> @@ -274,6 +321,21 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { >> static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, >> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >> { >> + struct arm_smmu_device *smmu = smmu_domain->smmu; >> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >> + const struct actlr_variant *actlrvar; >> + int cbndx = smmu_domain->cfg.cbndx; >> + >> + if (qsmmu->data->actlrvar) { >> + actlrvar = qsmmu->data->actlrvar; >> + for (; actlrvar->io_start; actlrvar++) { >> + if (actlrvar->io_start == smmu->ioaddr) { >> + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >> + break; >> + } >> + } >> + } >> + > > This block and the one in qcom_adreno_smmu_init_context() are exactly > the same. Possible to do some refactoring? > I will check if this repeated blocks can be accomodated this into qcom_smmu_set_actlr function if that would be fine. >> smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; >> >> return 0; >> diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h >> index f3b91963e234..29d26dfa2ed9 100644 >> --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h >> +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h >> @@ -1,6 +1,6 @@ >> /* SPDX-License-Identifier: GPL-2.0-only */ >> /* >> - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. >> + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. >> */ >> > > It should be 2022-2023 . > Ack >> #ifndef _ARM_SMMU_QCOM_H >> @@ -24,8 +24,17 @@ struct qcom_smmu_config { >> const u32 *reg_offset; >> }; >> >> +struct actlr_config; >> + >> +struct actlr_variant { >> + const struct actlr_config *actlrcfg; >> + const resource_size_t io_start; >> +}; >> + >> struct qcom_smmu_match_data { >> + const struct actlr_variant *actlrvar; >> const struct qcom_smmu_config *cfg; >> + const int num_smmu; >> const struct arm_smmu_impl *impl; >> const struct arm_smmu_impl *adreno_impl; >> }; > > qcom_smmu_match_data::num_smmu needs cleanup. > Ack, thanks for pointing this out. Thanks & regards, Bibek > Thanks, > Pavan
On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: > > > On 1/10/2024 9:36 AM, Pavan Kondeti wrote: [...] >>> @@ -274,6 +321,21 @@ static const struct of_device_id >>> qcom_smmu_client_of_match[] __maybe_unused = { >>> static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, >>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >>> { >>> + struct arm_smmu_device *smmu = smmu_domain->smmu; >>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >>> + const struct actlr_variant *actlrvar; >>> + int cbndx = smmu_domain->cfg.cbndx; >>> + >>> + if (qsmmu->data->actlrvar) { >>> + actlrvar = qsmmu->data->actlrvar; >>> + for (; actlrvar->io_start; actlrvar++) { >>> + if (actlrvar->io_start == smmu->ioaddr) { >>> + qcom_smmu_set_actlr(dev, smmu, cbndx, >>> actlrvar->actlrcfg); >>> + break; >>> + } >>> + } >>> + } >>> + >> >> This block and the one in qcom_adreno_smmu_init_context() are exactly >> the same. Possible to do some refactoring? >> > > I will check if this repeated blocks can be accomodated this into > qcom_smmu_set_actlr function if that would be fine. > Also adding to this, this might increase the number of indentation inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this be an issue? Thanks, Bibek
On 1/10/24 13:55, Bibek Kumar Patro wrote: > > > On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: >> >> >> On 1/10/2024 9:36 AM, Pavan Kondeti wrote: > > [...] > >>>> @@ -274,6 +321,21 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { >>>> static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, >>>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >>>> { >>>> + struct arm_smmu_device *smmu = smmu_domain->smmu; >>>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >>>> + const struct actlr_variant *actlrvar; >>>> + int cbndx = smmu_domain->cfg.cbndx; >>>> + >>>> + if (qsmmu->data->actlrvar) { >>>> + actlrvar = qsmmu->data->actlrvar; >>>> + for (; actlrvar->io_start; actlrvar++) { >>>> + if (actlrvar->io_start == smmu->ioaddr) { >>>> + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >>>> + break; >>>> + } >>>> + } >>>> + } >>>> + >>> >>> This block and the one in qcom_adreno_smmu_init_context() are exactly >>> the same. Possible to do some refactoring? >>> >> >> I will check if this repeated blocks can be accomodated this into qcom_smmu_set_actlr function if that would be fine. >> > > Also adding to this, this might increase the number of indentation inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this > be an issue? By the way, we can refactor this: if (qsmmu->data->actlrvar) { actlrvar = qsmmu->data->actlrvar; for (; actlrvar->io_start; actlrvar++) { if (actlrvar->io_start == smmu->ioaddr) { qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); break; } } } into // add const u8 num_actlrcfgs to struct actrl_variant to // save on sentinel space: // sizeof(u8) < sizeof(ptr) + sizeof(resource_size_t) [declarations] const struct actlr_variant *actlrvar = qsmmu->data->actlrvar; int i; [rest of the functions] if (!actlrvar) return 0; for (i = 0; i < actrlvar->num_actrlcfgs; i++) { if (actlrvar[i].io_start == smmu->ioaddr) { qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); break; } } Saving both on .TEXT size and indentation levels :) Konrad
On 1/10/2024 11:26 PM, Konrad Dybcio wrote: > > > On 1/10/24 13:55, Bibek Kumar Patro wrote: >> >> >> On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: >>> >>> >>> On 1/10/2024 9:36 AM, Pavan Kondeti wrote: >> >> [...] >> >>>>> @@ -274,6 +321,21 @@ static const struct of_device_id >>>>> qcom_smmu_client_of_match[] __maybe_unused = { >>>>> static int qcom_smmu_init_context(struct arm_smmu_domain >>>>> *smmu_domain, >>>>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >>>>> { >>>>> + struct arm_smmu_device *smmu = smmu_domain->smmu; >>>>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >>>>> + const struct actlr_variant *actlrvar; >>>>> + int cbndx = smmu_domain->cfg.cbndx; >>>>> + >>>>> + if (qsmmu->data->actlrvar) { >>>>> + actlrvar = qsmmu->data->actlrvar; >>>>> + for (; actlrvar->io_start; actlrvar++) { >>>>> + if (actlrvar->io_start == smmu->ioaddr) { >>>>> + qcom_smmu_set_actlr(dev, smmu, cbndx, >>>>> actlrvar->actlrcfg); >>>>> + break; >>>>> + } >>>>> + } >>>>> + } >>>>> + >>>> >>>> This block and the one in qcom_adreno_smmu_init_context() are exactly >>>> the same. Possible to do some refactoring? >>>> >>> >>> I will check if this repeated blocks can be accomodated this into >>> qcom_smmu_set_actlr function if that would be fine. >>> >> >> Also adding to this, this might increase the number of indentation >> inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this >> be an issue? > > By the way, we can refactor this: > > if (qsmmu->data->actlrvar) { > actlrvar = qsmmu->data->actlrvar; > for (; actlrvar->io_start; actlrvar++) { > if (actlrvar->io_start == smmu->ioaddr) { > qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); > break; > } > } > } > > into > > // add const u8 num_actlrcfgs to struct actrl_variant to > // save on sentinel space: > // sizeof(u8) < sizeof(ptr) + sizeof(resource_size_t) > Git it, Would it be better to add this in struct qcom_smmu_match_data ? Posted a sample below. > > [declarations] > const struct actlr_variant *actlrvar = qsmmu->data->actlrvar; > int i; > > [rest of the functions] > > if (!actlrvar) > return 0; > > for (i = 0; i < actrlvar->num_actrlcfgs; i++) { > if (actlrvar[i].io_start == smmu->ioaddr) { > qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); > break; > } > } > > Saving both on .TEXT size and indentation levels :) > Thanks for this suggestion Konrad, will try to implement this, as it would reduce the indent levels to good extent. Would something like this be okay? static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); const struct actlr_variant *actlrvar; int cbndx = smmu_domain->cfg.cbndx; + int i; + actlrvar = qsmmu->data->actlrvar; + + if (!actlrvar) + goto end; + + for (i = 0; i < qsmmu->data->num_smmu ; i++) { + if (actlrvar[i].io_start == smmu->ioaddr) { + qcom_smmu_set_actlr(dev, smmu, cbndx, + actlrvar[i].actlrcfg); + break; } } +end: smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; return 0; Thanks & regards, Bibek > Konrad
On 1/11/24 19:09, Bibek Kumar Patro wrote: > > > On 1/10/2024 11:26 PM, Konrad Dybcio wrote: >> >> >> On 1/10/24 13:55, Bibek Kumar Patro wrote: >>> >>> >>> On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: >>>> >>>> >>>> On 1/10/2024 9:36 AM, Pavan Kondeti wrote: >>> >>> [...] >>> >>>>>> @@ -274,6 +321,21 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { >>>>>> static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, >>>>>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >>>>>> { >>>>>> + struct arm_smmu_device *smmu = smmu_domain->smmu; >>>>>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >>>>>> + const struct actlr_variant *actlrvar; >>>>>> + int cbndx = smmu_domain->cfg.cbndx; >>>>>> + >>>>>> + if (qsmmu->data->actlrvar) { >>>>>> + actlrvar = qsmmu->data->actlrvar; >>>>>> + for (; actlrvar->io_start; actlrvar++) { >>>>>> + if (actlrvar->io_start == smmu->ioaddr) { >>>>>> + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >>>>>> + break; >>>>>> + } >>>>>> + } >>>>>> + } >>>>>> + >>>>> >>>>> This block and the one in qcom_adreno_smmu_init_context() are exactly >>>>> the same. Possible to do some refactoring? >>>>> >>>> >>>> I will check if this repeated blocks can be accomodated this into qcom_smmu_set_actlr function if that would be fine. >>>> >>> >>> Also adding to this, this might increase the number of indentation inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this >>> be an issue? >> >> By the way, we can refactor this: >> >> if (qsmmu->data->actlrvar) { >> actlrvar = qsmmu->data->actlrvar; >> for (; actlrvar->io_start; actlrvar++) { >> if (actlrvar->io_start == smmu->ioaddr) { >> qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >> break; >> } >> } >> } >> >> into >> >> // add const u8 num_actlrcfgs to struct actrl_variant to >> // save on sentinel space: >> // sizeof(u8) < sizeof(ptr) + sizeof(resource_size_t) >> > > Git it, Would it be better to add this in struct qcom_smmu_match_data ? Yes, right. > Posted a sample below. > >> >> [declarations] >> const struct actlr_variant *actlrvar = qsmmu->data->actlrvar; >> int i; >> >> [rest of the functions] >> >> if (!actlrvar) >> return 0; >> > for (i = 0; i < actrlvar->num_actrlcfgs; i++) { >> if (actlrvar[i].io_start == smmu->ioaddr) { >> qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >> break; >> } >> } >> > Saving both on .TEXT size and indentation levels :) >> > Thanks for this suggestion Konrad, will try to implement this, as it would reduce the indent levels to good extent. > Would something like this be okay? > > static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, > struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); > const struct actlr_variant *actlrvar; > int cbndx = smmu_domain->cfg.cbndx; > + int i; > > + actlrvar = qsmmu->data->actlrvar; > + > + if (!actlrvar) > + goto end; > + > + for (i = 0; i < qsmmu->data->num_smmu ; i++) { > + if (actlrvar[i].io_start == smmu->ioaddr) { > + qcom_smmu_set_actlr(dev, smmu, cbndx, > + actlrvar[i].actlrcfg); > + break; > } > } > > +end: > smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; If you move this assignment before the actlrvar checking (there's no dependency between them), you will get rid of the goto. I also noticed that qcom_smmu_match_data.actlrvar could likely be const struct actlr_variant * const (const pointer to a const resource), similarly for actlr_variant.actlrcfg Konrad
On 1/12/2024 3:31 PM, Konrad Dybcio wrote: > > > On 1/11/24 19:09, Bibek Kumar Patro wrote: >> >> >> On 1/10/2024 11:26 PM, Konrad Dybcio wrote: >>> >>> >>> On 1/10/24 13:55, Bibek Kumar Patro wrote: >>>> >>>> >>>> On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: >>>>> >>>>> >>>>> On 1/10/2024 9:36 AM, Pavan Kondeti wrote: >>>> >>>> [...] >>>> >>>>>>> @@ -274,6 +321,21 @@ static const struct of_device_id >>>>>>> qcom_smmu_client_of_match[] __maybe_unused = { >>>>>>> static int qcom_smmu_init_context(struct arm_smmu_domain >>>>>>> *smmu_domain, >>>>>>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >>>>>>> { >>>>>>> + struct arm_smmu_device *smmu = smmu_domain->smmu; >>>>>>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >>>>>>> + const struct actlr_variant *actlrvar; >>>>>>> + int cbndx = smmu_domain->cfg.cbndx; >>>>>>> + >>>>>>> + if (qsmmu->data->actlrvar) { >>>>>>> + actlrvar = qsmmu->data->actlrvar; >>>>>>> + for (; actlrvar->io_start; actlrvar++) { >>>>>>> + if (actlrvar->io_start == smmu->ioaddr) { >>>>>>> + qcom_smmu_set_actlr(dev, smmu, cbndx, >>>>>>> actlrvar->actlrcfg); >>>>>>> + break; >>>>>>> + } >>>>>>> + } >>>>>>> + } >>>>>>> + >>>>>> >>>>>> This block and the one in qcom_adreno_smmu_init_context() are exactly >>>>>> the same. Possible to do some refactoring? >>>>>> >>>>> >>>>> I will check if this repeated blocks can be accomodated this into >>>>> qcom_smmu_set_actlr function if that would be fine. >>>>> >>>> >>>> Also adding to this, this might increase the number of indentation >>>> inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this >>>> be an issue? >>> >>> By the way, we can refactor this: >>> >>> if (qsmmu->data->actlrvar) { >>> actlrvar = qsmmu->data->actlrvar; >>> for (; actlrvar->io_start; actlrvar++) { >>> if (actlrvar->io_start == smmu->ioaddr) { >>> qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >>> break; >>> } >>> } >>> } >>> >>> into >>> >>> // add const u8 num_actlrcfgs to struct actrl_variant to >>> // save on sentinel space: >>> // sizeof(u8) < sizeof(ptr) + sizeof(resource_size_t) >>> >> >> Git it, Would it be better to add this in struct qcom_smmu_match_data ? > > Yes, right. > Actually, I noticed now, we can do both the actlr_config (num_actlrcfg is used) and actlr_var (num_smmu is used) in the similar by storing the number of elements in each of them. something like this: +static const struct actlr_config sc7280_apps_actlr_cfg[] = { + { 0x0800, 0x24e1, PREFETCH_DEFAULT | CMTLB }, + { 0x2000, 0x0163, PREFETCH_DEFAULT | CMTLB }, + { 0x2080, 0x0461, PREFETCH_DEFAULT | CMTLB }, + { 0x2100, 0x0161, PREFETCH_DEFAULT | CMTLB }, + { 0x0900, 0x0407, PREFETCH_SHALLOW | CPRE | CMTLB }, + { 0x2180, 0x0027, PREFETCH_SHALLOW | CPRE | CMTLB }, + { 0x1000, 0x07ff, PREFETCH_DEEP | CPRE | CMTLB }, +}; + +static const struct actlr_config sc7280_gfx_actlr_cfg[] = { + { 0x0000, 0x07ff, PREFETCH_SWITCH_GFX | PREFETCH_DEEP | CPRE | CMTLB }, +}; + +static const struct actlr_variant sc7280_actlr[] = { + { .io_start = 0x15000000, .actlrcfg = sc7280_apps_actlr_cfg, .num_actlrcfg = 7 }, + { .io_start = 0x03da0000, .actlrcfg = sc7280_gfx_actlr_cfg, .num_actlrcfg = 1 }, +}; + static const struct actlr_config sm8550_apps_actlr_cfg[] = { { 0x18a0, 0x0000, PREFETCH_SHALLOW | CPRE | CMTLB }, { 0x18e0, 0x0000, PREFETCH_SHALLOW | CPRE | CMTLB }, @@ -661,6 +680,13 @@ static const struct qcom_smmu_match_data sdm845_smmu_500_data = { /* Also no debug configuration. */ }; +static const struct qcom_smmu_match_data sc7280_smmu_500_impl0_data = { + .impl = &qcom_smmu_500_impl, + .adreno_impl = &qcom_adreno_smmu_500_impl, + .cfg = &qcom_smmu_impl0_cfg, + .actlrvar = sc7280_actlr, + .num_smmu = 2, +}; Just for note , there's a small hiccup here as we have to manually calculate and the number of elements in actlr_config size everytime we add this info for a new target, won't be an issue though but just a hindrance to automation (?) >> Posted a sample below. >> >>> >>> [declarations] >>> const struct actlr_variant *actlrvar = qsmmu->data->actlrvar; >>> int i; >>> >>> [rest of the functions] >>> >>> if (!actlrvar) >>> return 0; >>> > for (i = 0; i < actrlvar->num_actrlcfgs; i++) { >>> if (actlrvar[i].io_start == smmu->ioaddr) { >>> qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >>> break; >>> } >>> } >>> > Saving both on .TEXT size and indentation levels :) >>> >> Thanks for this suggestion Konrad, will try to implement this, as it >> would reduce the indent levels to good extent. >> Would something like this be okay? >> >> static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, >> struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >> const struct actlr_variant *actlrvar; >> int cbndx = smmu_domain->cfg.cbndx; >> + int i; >> >> + actlrvar = qsmmu->data->actlrvar; >> + >> + if (!actlrvar) >> + goto end; >> + >> + for (i = 0; i < qsmmu->data->num_smmu ; i++) { >> + if (actlrvar[i].io_start == smmu->ioaddr) { >> + qcom_smmu_set_actlr(dev, smmu, cbndx, >> + actlrvar[i].actlrcfg); >> + break; >> } >> } >> >> +end: >> smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; > > If you move this assignment before the actlrvar checking (there's no > dependency between them), you will get rid of the goto. > Ack thanks, it's tlb flush operation so won't be an issue moving this before the check. > I also noticed that qcom_smmu_match_data.actlrvar could likely be > const struct actlr_variant * const (const pointer to a const > resource), similarly for actlr_variant.actlrcfg > Sure, make sense, as we aren't aiming to modify these values later on. Would address all these inputs in next version. Thanks & regards, Bibek > Konrad
On Fri, 12 Jan 2024 at 15:07, Bibek Kumar Patro <quic_bibekkum@quicinc.com> wrote: > > > > On 1/12/2024 3:31 PM, Konrad Dybcio wrote: > > > > > > On 1/11/24 19:09, Bibek Kumar Patro wrote: > >> > >> > >> On 1/10/2024 11:26 PM, Konrad Dybcio wrote: > >>> > >>> > >>> On 1/10/24 13:55, Bibek Kumar Patro wrote: > >>>> > >>>> > >>>> On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: > >>>>> > >>>>> > >>>>> On 1/10/2024 9:36 AM, Pavan Kondeti wrote: > >>>> > >>>> [...] > >>>> > >>>>>>> @@ -274,6 +321,21 @@ static const struct of_device_id > >>>>>>> qcom_smmu_client_of_match[] __maybe_unused = { > >>>>>>> static int qcom_smmu_init_context(struct arm_smmu_domain > >>>>>>> *smmu_domain, > >>>>>>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) > >>>>>>> { > >>>>>>> + struct arm_smmu_device *smmu = smmu_domain->smmu; > >>>>>>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); > >>>>>>> + const struct actlr_variant *actlrvar; > >>>>>>> + int cbndx = smmu_domain->cfg.cbndx; > >>>>>>> + > >>>>>>> + if (qsmmu->data->actlrvar) { > >>>>>>> + actlrvar = qsmmu->data->actlrvar; > >>>>>>> + for (; actlrvar->io_start; actlrvar++) { > >>>>>>> + if (actlrvar->io_start == smmu->ioaddr) { > >>>>>>> + qcom_smmu_set_actlr(dev, smmu, cbndx, > >>>>>>> actlrvar->actlrcfg); > >>>>>>> + break; > >>>>>>> + } > >>>>>>> + } > >>>>>>> + } > >>>>>>> + > >>>>>> > >>>>>> This block and the one in qcom_adreno_smmu_init_context() are exactly > >>>>>> the same. Possible to do some refactoring? > >>>>>> > >>>>> > >>>>> I will check if this repeated blocks can be accomodated this into > >>>>> qcom_smmu_set_actlr function if that would be fine. > >>>>> > >>>> > >>>> Also adding to this, this might increase the number of indentation > >>>> inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this > >>>> be an issue? > >>> > >>> By the way, we can refactor this: > >>> > >>> if (qsmmu->data->actlrvar) { > >>> actlrvar = qsmmu->data->actlrvar; > >>> for (; actlrvar->io_start; actlrvar++) { > >>> if (actlrvar->io_start == smmu->ioaddr) { > >>> qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); > >>> break; > >>> } > >>> } > >>> } > >>> > >>> into > >>> > >>> // add const u8 num_actlrcfgs to struct actrl_variant to > >>> // save on sentinel space: > >>> // sizeof(u8) < sizeof(ptr) + sizeof(resource_size_t) > >>> > >> > >> Git it, Would it be better to add this in struct qcom_smmu_match_data ? > > > > Yes, right. > > > > Actually, I noticed now, we can do both the actlr_config (num_actlrcfg > is used) and actlr_var (num_smmu is used) in the similar by storing the > number of elements in each of them. > something like this: > > +static const struct actlr_config sc7280_apps_actlr_cfg[] = { > + { 0x0800, 0x24e1, PREFETCH_DEFAULT | CMTLB }, > + { 0x2000, 0x0163, PREFETCH_DEFAULT | CMTLB }, > + { 0x2080, 0x0461, PREFETCH_DEFAULT | CMTLB }, > + { 0x2100, 0x0161, PREFETCH_DEFAULT | CMTLB }, > + { 0x0900, 0x0407, PREFETCH_SHALLOW | CPRE | CMTLB }, > + { 0x2180, 0x0027, PREFETCH_SHALLOW | CPRE | CMTLB }, > + { 0x1000, 0x07ff, PREFETCH_DEEP | CPRE | CMTLB }, > +}; > + > +static const struct actlr_config sc7280_gfx_actlr_cfg[] = { > + { 0x0000, 0x07ff, PREFETCH_SWITCH_GFX | PREFETCH_DEEP | CPRE | CMTLB }, > +}; > + > +static const struct actlr_variant sc7280_actlr[] = { > + { .io_start = 0x15000000, .actlrcfg = sc7280_apps_actlr_cfg, > .num_actlrcfg = 7 }, > + { .io_start = 0x03da0000, .actlrcfg = sc7280_gfx_actlr_cfg, > .num_actlrcfg = 1 }, > +}; > + > static const struct actlr_config sm8550_apps_actlr_cfg[] = { > { 0x18a0, 0x0000, PREFETCH_SHALLOW | CPRE | CMTLB }, > { 0x18e0, 0x0000, PREFETCH_SHALLOW | CPRE | CMTLB }, > @@ -661,6 +680,13 @@ static const struct qcom_smmu_match_data > sdm845_smmu_500_data = { > /* Also no debug configuration. */ > }; > > +static const struct qcom_smmu_match_data sc7280_smmu_500_impl0_data = { > + .impl = &qcom_smmu_500_impl, > + .adreno_impl = &qcom_adreno_smmu_500_impl, > + .cfg = &qcom_smmu_impl0_cfg, > + .actlrvar = sc7280_actlr, > + .num_smmu = 2, > +}; > > Just for note , there's a small hiccup here as we have to manually > calculate and the number of elements in actlr_config size everytime we > add this info for a new target, won't be an issue though but just a > hindrance to automation (?) Just use ARRAY_SIZE(sc7280_actlr).
On 1/12/2024 6:44 PM, Dmitry Baryshkov wrote: > On Fri, 12 Jan 2024 at 15:07, Bibek Kumar Patro > <quic_bibekkum@quicinc.com> wrote: >> >> >> >> On 1/12/2024 3:31 PM, Konrad Dybcio wrote: >>> >>> >>> On 1/11/24 19:09, Bibek Kumar Patro wrote: >>>> >>>> >>>> On 1/10/2024 11:26 PM, Konrad Dybcio wrote: >>>>> >>>>> >>>>> On 1/10/24 13:55, Bibek Kumar Patro wrote: >>>>>> >>>>>> >>>>>> On 1/10/2024 4:46 PM, Bibek Kumar Patro wrote: >>>>>>> >>>>>>> >>>>>>> On 1/10/2024 9:36 AM, Pavan Kondeti wrote: >>>>>> >>>>>> [...] >>>>>> >>>>>>>>> @@ -274,6 +321,21 @@ static const struct of_device_id >>>>>>>>> qcom_smmu_client_of_match[] __maybe_unused = { >>>>>>>>> static int qcom_smmu_init_context(struct arm_smmu_domain >>>>>>>>> *smmu_domain, >>>>>>>>> struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) >>>>>>>>> { >>>>>>>>> + struct arm_smmu_device *smmu = smmu_domain->smmu; >>>>>>>>> + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); >>>>>>>>> + const struct actlr_variant *actlrvar; >>>>>>>>> + int cbndx = smmu_domain->cfg.cbndx; >>>>>>>>> + >>>>>>>>> + if (qsmmu->data->actlrvar) { >>>>>>>>> + actlrvar = qsmmu->data->actlrvar; >>>>>>>>> + for (; actlrvar->io_start; actlrvar++) { >>>>>>>>> + if (actlrvar->io_start == smmu->ioaddr) { >>>>>>>>> + qcom_smmu_set_actlr(dev, smmu, cbndx, >>>>>>>>> actlrvar->actlrcfg); >>>>>>>>> + break; >>>>>>>>> + } >>>>>>>>> + } >>>>>>>>> + } >>>>>>>>> + >>>>>>>> >>>>>>>> This block and the one in qcom_adreno_smmu_init_context() are exactly >>>>>>>> the same. Possible to do some refactoring? >>>>>>>> >>>>>>> >>>>>>> I will check if this repeated blocks can be accomodated this into >>>>>>> qcom_smmu_set_actlr function if that would be fine. >>>>>>> >>>>>> >>>>>> Also adding to this, this might increase the number of indentation >>>>>> inside qcom_smmu_set_actlr as well, to around 5. So wouldn't this >>>>>> be an issue? >>>>> >>>>> By the way, we can refactor this: >>>>> >>>>> if (qsmmu->data->actlrvar) { >>>>> actlrvar = qsmmu->data->actlrvar; >>>>> for (; actlrvar->io_start; actlrvar++) { >>>>> if (actlrvar->io_start == smmu->ioaddr) { >>>>> qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); >>>>> break; >>>>> } >>>>> } >>>>> } >>>>> >>>>> into >>>>> >>>>> // add const u8 num_actlrcfgs to struct actrl_variant to >>>>> // save on sentinel space: >>>>> // sizeof(u8) < sizeof(ptr) + sizeof(resource_size_t) >>>>> >>>> >>>> Git it, Would it be better to add this in struct qcom_smmu_match_data ? >>> >>> Yes, right. >>> >> >> Actually, I noticed now, we can do both the actlr_config (num_actlrcfg >> is used) and actlr_var (num_smmu is used) in the similar by storing the >> number of elements in each of them. >> something like this: >> >> +static const struct actlr_config sc7280_apps_actlr_cfg[] = { >> + { 0x0800, 0x24e1, PREFETCH_DEFAULT | CMTLB }, >> + { 0x2000, 0x0163, PREFETCH_DEFAULT | CMTLB }, >> + { 0x2080, 0x0461, PREFETCH_DEFAULT | CMTLB }, >> + { 0x2100, 0x0161, PREFETCH_DEFAULT | CMTLB }, >> + { 0x0900, 0x0407, PREFETCH_SHALLOW | CPRE | CMTLB }, >> + { 0x2180, 0x0027, PREFETCH_SHALLOW | CPRE | CMTLB }, >> + { 0x1000, 0x07ff, PREFETCH_DEEP | CPRE | CMTLB }, >> +}; >> + >> +static const struct actlr_config sc7280_gfx_actlr_cfg[] = { >> + { 0x0000, 0x07ff, PREFETCH_SWITCH_GFX | PREFETCH_DEEP | CPRE | CMTLB }, >> +}; >> + >> +static const struct actlr_variant sc7280_actlr[] = { >> + { .io_start = 0x15000000, .actlrcfg = sc7280_apps_actlr_cfg, >> .num_actlrcfg = 7 }, >> + { .io_start = 0x03da0000, .actlrcfg = sc7280_gfx_actlr_cfg, >> .num_actlrcfg = 1 }, >> +}; >> + >> static const struct actlr_config sm8550_apps_actlr_cfg[] = { >> { 0x18a0, 0x0000, PREFETCH_SHALLOW | CPRE | CMTLB }, >> { 0x18e0, 0x0000, PREFETCH_SHALLOW | CPRE | CMTLB }, >> @@ -661,6 +680,13 @@ static const struct qcom_smmu_match_data >> sdm845_smmu_500_data = { >> /* Also no debug configuration. */ >> }; >> >> +static const struct qcom_smmu_match_data sc7280_smmu_500_impl0_data = { >> + .impl = &qcom_smmu_500_impl, >> + .adreno_impl = &qcom_adreno_smmu_500_impl, >> + .cfg = &qcom_smmu_impl0_cfg, >> + .actlrvar = sc7280_actlr, >> + .num_smmu = 2, >> +}; >> >> Just for note , there's a small hiccup here as we have to manually >> calculate and the number of elements in actlr_config size everytime we >> add this info for a new target, won't be an issue though but just a >> hindrance to automation (?) > > Just use ARRAY_SIZE(sc7280_actlr). > Noted, ARRAY_SIZE makes sense now for this new io_address based matching. Thanks & regards, Bibek >
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 20c9836d859b..48586952fae4 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -24,6 +24,12 @@ #define CPRE (1 << 1) #define CMTLB (1 << 0) +struct actlr_config { + u16 sid; + u16 mask; + u32 actlr; +}; + static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) { return container_of(smmu, struct qcom_smmu, smmu); @@ -215,9 +221,40 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu) return true; } +static void qcom_smmu_set_actlr(struct device *dev, struct arm_smmu_device *smmu, int cbndx, + const struct actlr_config *actlrcfg) +{ + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct arm_smmu_smr *smr; + u16 mask; + int idx; + u16 id; + int i; + int j; + + for (i = 0; actlrcfg[i].sid || actlrcfg[i].mask || actlrcfg[i].actlr; i++) { + id = actlrcfg[i].sid; + mask = actlrcfg[i].mask; + + for_each_cfg_sme(cfg, fwspec, j, idx) { + smr = &smmu->smrs[idx]; + if (smr_is_subset(smr, id, mask)) { + arm_smmu_cb_write(smmu, cbndx, ARM_SMMU_CB_ACTLR, + actlrcfg[i].actlr); + break; + } + } + } +} + static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) { + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + const struct actlr_variant *actlrvar; + int cbndx = smmu_domain->cfg.cbndx; struct adreno_smmu_priv *priv; smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; @@ -248,6 +285,16 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, priv->set_stall = qcom_adreno_smmu_set_stall; priv->resume_translation = qcom_adreno_smmu_resume_translation; + if (qsmmu->data->actlrvar) { + actlrvar = qsmmu->data->actlrvar; + for (; actlrvar->io_start; actlrvar++) { + if (actlrvar->io_start == smmu->ioaddr) { + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); + break; + } + } + } + return 0; } @@ -274,6 +321,21 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain, struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) { + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + const struct actlr_variant *actlrvar; + int cbndx = smmu_domain->cfg.cbndx; + + if (qsmmu->data->actlrvar) { + actlrvar = qsmmu->data->actlrvar; + for (; actlrvar->io_start; actlrvar++) { + if (actlrvar->io_start == smmu->ioaddr) { + qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar->actlrcfg); + break; + } + } + } + smmu_domain->cfg.flush_walk_prefer_tlbiasid = true; return 0; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h index f3b91963e234..29d26dfa2ed9 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ARM_SMMU_QCOM_H @@ -24,8 +24,17 @@ struct qcom_smmu_config { const u32 *reg_offset; }; +struct actlr_config; + +struct actlr_variant { + const struct actlr_config *actlrcfg; + const resource_size_t io_start; +}; + struct qcom_smmu_match_data { + const struct actlr_variant *actlrvar; const struct qcom_smmu_config *cfg; + const int num_smmu; const struct arm_smmu_impl *impl; const struct arm_smmu_impl *adreno_impl; }; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index d6d1a2a55cc0..0c7f700b27dd 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -990,9 +990,10 @@ static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) * expect simply identical entries for this case, but there's * no harm in accommodating the generalisation. */ - if ((mask & smrs[i].mask) == mask && - !((id ^ smrs[i].id) & ~smrs[i].mask)) + + if (smr_is_subset(&smrs[i], id, mask)) return i; + /* * If the new entry has any other overlap with an existing one, * though, then there always exists at least one stream ID diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 703fd5817ec1..2e4f65412c6b 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -501,6 +501,11 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); } +static inline bool smr_is_subset(struct arm_smmu_smr *smrs, u16 id, u16 mask) +{ + return (mask & smrs->mask) == mask && !((id ^ smrs->id) & ~smrs->mask); +} + #define ARM_SMMU_GR0 0 #define ARM_SMMU_GR1 1 #define ARM_SMMU_CB(s, n) ((s)->numpage + (n))
Currently in Qualcomm SoCs the default prefetch is set to 1 which allows the TLB to fetch just the next page table. MMU-500 features ACTLR register which is implementation defined and is used for Qualcomm SoCs to have a custom prefetch setting enabling TLB to prefetch the next set of page tables accordingly allowing for faster translations. ACTLR value is unique for each SMR (Stream matching register) and stored in a pre-populated table. This value is set to the register during context bank initialisation. Suggested-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Signed-off-by: Bibek Kumar Patro <quic_bibekkum@quicinc.com> --- drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 62 ++++++++++++++++++++++ drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h | 11 +++- drivers/iommu/arm/arm-smmu/arm-smmu.c | 5 +- drivers/iommu/arm/arm-smmu/arm-smmu.h | 5 ++ 4 files changed, 80 insertions(+), 3 deletions(-) -- 2.17.1