@@ -15,9 +15,7 @@
#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
/* DSM calendar information */
-#define SPX5_DSM_CAL_LEN 64
#define SPX5_DSM_CAL_EMPTY 0xFFFF
-#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
#define SPX5_DSM_CAL_TAXIS 8
#define SPX5_DSM_CAL_BW_LOSS 553
@@ -37,19 +35,6 @@ static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]
{64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
};
-struct sparx5_calendar_data {
- u32 schedule[SPX5_DSM_CAL_LEN];
- u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 new_slots[SPX5_DSM_CAL_LEN];
- u32 temp_sched[SPX5_DSM_CAL_LEN];
- u32 indices[SPX5_DSM_CAL_LEN];
- u32 short_list[SPX5_DSM_CAL_LEN];
- u32 long_list[SPX5_DSM_CAL_LEN];
-};
-
static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
{
switch (sparx5->target_ct) {
@@ -278,8 +263,8 @@ static u32 sparx5_dsm_cp_cal(u32 *sched)
return SPX5_DSM_CAL_EMPTY;
}
-static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
- struct sparx5_calendar_data *data)
+int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
{
bool slow_mode;
u32 gcd, idx, sum, min, factor;
@@ -565,6 +550,7 @@ static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
/* Configure the DSM calendar based on port configuration */
int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
int taxi;
struct sparx5_calendar_data *data;
int err = 0;
@@ -574,7 +560,7 @@ int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
return -ENOMEM;
for (taxi = 0; taxi < SPX5_CONST(n_dsm_cal_taxis); ++taxi) {
- err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+ err = ops->dsm_calendar_calc(sparx5, taxi, data);
if (err) {
dev_err(sparx5->dev, "DSM calendar calculation failed\n");
goto cal_out;
@@ -989,6 +989,7 @@ static const struct sparx5_ops sparx5_ops = {
.get_sdlb_group = &sparx5_get_sdlb_group,
.set_port_mux = &sparx5_port_mux_set,
.ptp_irq_handler = &sparx5_ptp_irq_handler,
+ .dsm_calendar_calc = &sparx5_dsm_calendar_calc,
};
static const struct sparx5_match_data sparx5_desc = {
@@ -103,8 +103,24 @@ enum sparx5_vlan_port_type {
#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
+#define SPX5_DSM_CAL_LEN 64
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+
struct sparx5;
+struct sparx5_calendar_data {
+ u32 schedule[SPX5_DSM_CAL_LEN];
+ u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 new_slots[SPX5_DSM_CAL_LEN];
+ u32 temp_sched[SPX5_DSM_CAL_LEN];
+ u32 indices[SPX5_DSM_CAL_LEN];
+ u32 short_list[SPX5_DSM_CAL_LEN];
+ u32 long_list[SPX5_DSM_CAL_LEN];
+};
+
/* Frame DMA receive state:
* For each DB, there is a SKB, and the skb data pointer is mapped in
* the DB. Once a frame is received the skb is given to the upper layers
@@ -273,6 +289,8 @@ struct sparx5_ops {
struct sparx5_port_config *conf);
irqreturn_t (*ptp_irq_handler)(int irq, void *args);
+ int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
};
struct sparx5_main_io_resource {
@@ -423,6 +441,9 @@ void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
/* sparx5_calendar.c */
int sparx5_config_auto_calendar(struct sparx5 *sparx5);
int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+
/* sparx5_ethtool.c */
void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);