diff mbox series

irqchip: mips-gic: Handle case with cluster without CPU cores

Message ID 20241025-no-cpu-cluster-support-v1-1-5e81fcf9f25c@bootlin.com (mailing list archive)
State Superseded
Headers show
Series irqchip: mips-gic: Handle case with cluster without CPU cores | expand

Commit Message

Gregory CLEMENT Oct. 25, 2024, 3:46 p.m. UTC
It is possible to have no CPU cores in a cluster; in such cases, it is
not possible to access the GIC, and any indirect access leads to an
exception. This patch dynamically skips the indirect access in such
situations.

Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
---
This patch is a follow-up of the series "MIPS: Support I6500
multi-cluster configuration"
https://lore.kernel.org/lkml/20241019071037.145314-1-arikalo@gmail.com/#t
---
 drivers/irqchip/irq-mips-gic.c | 20 ++++++++++++++++----
 1 file changed, 16 insertions(+), 4 deletions(-)


---
base-commit: 10e44701486e25d630d714ace2b0c6d9a178b331
change-id: 20241025-no-cpu-cluster-support-1745e8abd7d1

Best regards,

Comments

Jiaxun Yang Oct. 25, 2024, 4:43 p.m. UTC | #1
在2024年10月25日十月 下午4:46,Gregory CLEMENT写道:
> It is possible to have no CPU cores in a cluster; in such cases, it is
> not possible to access the GIC, and any indirect access leads to an
> exception. This patch dynamically skips the indirect access in such
> situations.

Hi Gregory,

I'm a little bit confused here, as I have never seen such wired configuration.

Is second cluster IOCU only?

Thanks
- Jiaxun

>
> Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
> ---
> This patch is a follow-up of the series "MIPS: Support I6500
> multi-cluster configuration"
> https://lore.kernel.org/lkml/20241019071037.145314-1-arikalo@gmail.com/#t
> ---
>  drivers/irqchip/irq-mips-gic.c | 20 ++++++++++++++++----
>  1 file changed, 16 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
> index f42f69bbd6fb1..bca8053864b2c 100644
> --- a/drivers/irqchip/irq-mips-gic.c
> +++ b/drivers/irqchip/irq-mips-gic.c
> @@ -141,7 +141,8 @@ static bool gic_irq_lock_cluster(struct irq_data *d)
>  	cl = cpu_cluster(&cpu_data[cpu]);
>  	if (cl == cpu_cluster(&current_cpu_data))
>  		return false;
> -
> +	if (mips_cps_numcores(cl) == 0)
> +		return false;
>  	mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
>  	return true;
>  }
> @@ -507,6 +508,9 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
>  	struct gic_all_vpes_chip_data *cd;
>  	int intr, cpu;
> 
> +	if (!mips_cps_multicluster_cpus())
> +		return;
> +
>  	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
>  	cd = irq_data_get_irq_chip_data(d);
>  	cd->mask = false;
> @@ -520,6 +524,9 @@ static void gic_unmask_local_irq_all_vpes(struct 
> irq_data *d)
>  	struct gic_all_vpes_chip_data *cd;
>  	int intr, cpu;
> 
> +	if (!mips_cps_multicluster_cpus())
> +		return;
> +
>  	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
>  	cd = irq_data_get_irq_chip_data(d);
>  	cd->mask = true;
> @@ -687,8 +694,10 @@ static int gic_irq_domain_map(struct irq_domain 
> *d, unsigned int virq,
>  	if (!gic_local_irq_is_routable(intr))
>  		return -EPERM;
> 
> -	for_each_online_cpu_gic(cpu, &gic_lock)
> -		write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
> +	if (mips_cps_multicluster_cpus()) {
> +		for_each_online_cpu_gic(cpu, &gic_lock)
> +			write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
> +	}
> 
>  	return 0;
>  }
> @@ -982,7 +991,7 @@ static int __init gic_of_init(struct device_node *node,
>  				change_gic_trig(i, GIC_TRIG_LEVEL);
>  				write_gic_rmask(i);
>  			}
> -		} else {
> +		} else if (mips_cps_numcores(cl) != 0) {
>  			mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
>  			for (i = 0; i < gic_shared_intrs; i++) {
>  				change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
> @@ -990,6 +999,9 @@ static int __init gic_of_init(struct device_node *node,
>  				write_gic_redir_rmask(i);
>  			}
>  			mips_cm_unlock_other();
> +
> +		} else {
> +			pr_warn("No CPU cores on the cluster %d skip it\n", cl);
>  		}
>  	}
> 
>
> ---
> base-commit: 10e44701486e25d630d714ace2b0c6d9a178b331
> change-id: 20241025-no-cpu-cluster-support-1745e8abd7d1
>
> Best regards,
> -- 
> Gregory CLEMENT <gregory.clement@bootlin.com>
Gregory CLEMENT Oct. 25, 2024, 8:40 p.m. UTC | #2
Hi Jiaxun,

> 在2024年10月25日十月 下午4:46,Gregory CLEMENT写道:
>> It is possible to have no CPU cores in a cluster; in such cases, it is
>> not possible to access the GIC, and any indirect access leads to an
>> exception. This patch dynamically skips the indirect access in such
>> situations.
>
> Hi Gregory,
>
> I'm a little bit confused here, as I have never seen such wired configuration.
>
> Is second cluster IOCU only?

Yes indeed in EyeQ5, the second cluster is the place for many
accelerator for vision that benefit of the L2 cache and of the coherency
unit.

Gregory

>
> Thanks
> - Jiaxun
>
>>
>> Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
>> ---
>> This patch is a follow-up of the series "MIPS: Support I6500
>> multi-cluster configuration"
>> https://lore.kernel.org/lkml/20241019071037.145314-1-arikalo@gmail.com/#t
>> ---
>>  drivers/irqchip/irq-mips-gic.c | 20 ++++++++++++++++----
>>  1 file changed, 16 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
>> index f42f69bbd6fb1..bca8053864b2c 100644
>> --- a/drivers/irqchip/irq-mips-gic.c
>> +++ b/drivers/irqchip/irq-mips-gic.c
>> @@ -141,7 +141,8 @@ static bool gic_irq_lock_cluster(struct irq_data *d)
>>  	cl = cpu_cluster(&cpu_data[cpu]);
>>  	if (cl == cpu_cluster(&current_cpu_data))
>>  		return false;
>> -
>> +	if (mips_cps_numcores(cl) == 0)
>> +		return false;
>>  	mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
>>  	return true;
>>  }
>> @@ -507,6 +508,9 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
>>  	struct gic_all_vpes_chip_data *cd;
>>  	int intr, cpu;
>> 
>> +	if (!mips_cps_multicluster_cpus())
>> +		return;
>> +
>>  	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
>>  	cd = irq_data_get_irq_chip_data(d);
>>  	cd->mask = false;
>> @@ -520,6 +524,9 @@ static void gic_unmask_local_irq_all_vpes(struct 
>> irq_data *d)
>>  	struct gic_all_vpes_chip_data *cd;
>>  	int intr, cpu;
>> 
>> +	if (!mips_cps_multicluster_cpus())
>> +		return;
>> +
>>  	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
>>  	cd = irq_data_get_irq_chip_data(d);
>>  	cd->mask = true;
>> @@ -687,8 +694,10 @@ static int gic_irq_domain_map(struct irq_domain 
>> *d, unsigned int virq,
>>  	if (!gic_local_irq_is_routable(intr))
>>  		return -EPERM;
>> 
>> -	for_each_online_cpu_gic(cpu, &gic_lock)
>> -		write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
>> +	if (mips_cps_multicluster_cpus()) {
>> +		for_each_online_cpu_gic(cpu, &gic_lock)
>> +			write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
>> +	}
>> 
>>  	return 0;
>>  }
>> @@ -982,7 +991,7 @@ static int __init gic_of_init(struct device_node *node,
>>  				change_gic_trig(i, GIC_TRIG_LEVEL);
>>  				write_gic_rmask(i);
>>  			}
>> -		} else {
>> +		} else if (mips_cps_numcores(cl) != 0) {
>>  			mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
>>  			for (i = 0; i < gic_shared_intrs; i++) {
>>  				change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
>> @@ -990,6 +999,9 @@ static int __init gic_of_init(struct device_node *node,
>>  				write_gic_redir_rmask(i);
>>  			}
>>  			mips_cm_unlock_other();
>> +
>> +		} else {
>> +			pr_warn("No CPU cores on the cluster %d skip it\n", cl);
>>  		}
>>  	}
>> 
>>
>> ---
>> base-commit: 10e44701486e25d630d714ace2b0c6d9a178b331
>> change-id: 20241025-no-cpu-cluster-support-1745e8abd7d1
>>
>> Best regards,
>> -- 
>> Gregory CLEMENT <gregory.clement@bootlin.com>
>
> -- 
> - Jiaxun
Jiaxun Yang Oct. 25, 2024, 8:47 p.m. UTC | #3
在2024年10月25日十月 下午9:40,Gregory CLEMENT写道:
> Hi Jiaxun,
>
[...]
>> Is second cluster IOCU only?
>
> Yes indeed in EyeQ5, the second cluster is the place for many
> accelerator for vision that benefit of the L2 cache and of the coherency
> unit.

It makes sense to me then, that for the information. I just checked
IOCU only MPS release and indeed those special handling are necessary.

Reviewd-by: Jiaxun Yang <jiaxun.yang@flygoat.com>

I think some initialisation to IOCU and L2C in second cluster is also
necessary, but I guess those are already done by firmware in your case?

Thanks
Thomas Gleixner Oct. 28, 2024, 1:24 p.m. UTC | #4
On Fri, Oct 25 2024 at 17:46, Gregory CLEMENT wrote:
> It is possible to have no CPU cores in a cluster; in such cases, it is
> not possible to access the GIC, and any indirect access leads to an
> exception. This patch dynamically skips the indirect access in such
> situations.
>
> Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
> ---
> This patch is a follow-up of the series "MIPS: Support I6500
> multi-cluster configuration"
> https://lore.kernel.org/lkml/20241019071037.145314-1-arikalo@gmail.com/#t

And should be integrated into the next iteration of that series.

Thanks,

        tglx
diff mbox series

Patch

diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f42f69bbd6fb1..bca8053864b2c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -141,7 +141,8 @@  static bool gic_irq_lock_cluster(struct irq_data *d)
 	cl = cpu_cluster(&cpu_data[cpu]);
 	if (cl == cpu_cluster(&current_cpu_data))
 		return false;
-
+	if (mips_cps_numcores(cl) == 0)
+		return false;
 	mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
 	return true;
 }
@@ -507,6 +508,9 @@  static void gic_mask_local_irq_all_vpes(struct irq_data *d)
 	struct gic_all_vpes_chip_data *cd;
 	int intr, cpu;
 
+	if (!mips_cps_multicluster_cpus())
+		return;
+
 	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 	cd = irq_data_get_irq_chip_data(d);
 	cd->mask = false;
@@ -520,6 +524,9 @@  static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
 	struct gic_all_vpes_chip_data *cd;
 	int intr, cpu;
 
+	if (!mips_cps_multicluster_cpus())
+		return;
+
 	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 	cd = irq_data_get_irq_chip_data(d);
 	cd->mask = true;
@@ -687,8 +694,10 @@  static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
 	if (!gic_local_irq_is_routable(intr))
 		return -EPERM;
 
-	for_each_online_cpu_gic(cpu, &gic_lock)
-		write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+	if (mips_cps_multicluster_cpus()) {
+		for_each_online_cpu_gic(cpu, &gic_lock)
+			write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+	}
 
 	return 0;
 }
@@ -982,7 +991,7 @@  static int __init gic_of_init(struct device_node *node,
 				change_gic_trig(i, GIC_TRIG_LEVEL);
 				write_gic_rmask(i);
 			}
-		} else {
+		} else if (mips_cps_numcores(cl) != 0) {
 			mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
 			for (i = 0; i < gic_shared_intrs; i++) {
 				change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
@@ -990,6 +999,9 @@  static int __init gic_of_init(struct device_node *node,
 				write_gic_redir_rmask(i);
 			}
 			mips_cm_unlock_other();
+
+		} else {
+			pr_warn("No CPU cores on the cluster %d skip it\n", cl);
 		}
 	}