Message ID | 20241113154616.2493297-10-milena.olech@intel.com (mailing list archive) |
---|---|
State | Awaiting Upstream |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | initial PTP support | expand |
Milena Olech wrote: > Add Rx timestamp function when the Rx timestamp value is read directly > from the Rx descriptor. Add supported Rx timestamp modes. > > Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> > Signed-off-by: Milena Olech <milena.olech@intel.com> > --- > drivers/net/ethernet/intel/idpf/idpf_ptp.c | 74 ++++++++++++++++++++- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++ > drivers/net/ethernet/intel/idpf/idpf_txrx.h | 7 +- > 3 files changed, 109 insertions(+), 2 deletions(-) > > diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c > index f34642d10768..f9f7613f2a6d 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c > +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c > @@ -317,12 +317,41 @@ static int idpf_ptp_gettimex64(struct ptp_clock_info *info, > return 0; > } > > +/** > + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx > + * queue group. Why does each receive group have a separate cached value? They're all caches of the same device clock. > + * @grp: receive queue group in which Rx timestamp is enabled > + * @split: Indicates whether the queue model is split or single queue > + * @systime: Cached system time > + */ > +static void > +idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split, > + u64 systime) > +{ > + struct idpf_rx_queue *rxq; > + u16 i; > + > + if (!split) { > + for (i = 0; i < grp->singleq.num_rxq; i++) { > + rxq = grp->singleq.rxqs[i]; > + if (rxq) > + WRITE_ONCE(rxq->cached_phc_time, systime); > + } > + } else { > + for (i = 0; i < grp->splitq.num_rxq_sets; i++) { > + rxq = &grp->splitq.rxq_sets[i]->rxq; > + if (rxq) > + WRITE_ONCE(rxq->cached_phc_time, systime); > + } > + } > +} > + > +/** > + * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping > + * @vport: Virtual port structure > + * @rx_filter: bool value for whether timestamps are enabled or disabled > + */ > +static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) > +{ > + vport->tstamp_config.rx_filter = rx_filter; > + > + if (rx_filter == HWTSTAMP_FILTER_NONE) > + return; Should this clear the bit if it was previously set, instead of returning immediately? > + > + for (u16 i = 0; i < vport->num_rxq_grp; i++) { > + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; > + u16 j; > + > + if (idpf_is_queue_model_split(vport->rxq_model)) { > + for (j = 0; j < grp->singleq.num_rxq; j++) > + idpf_queue_set(PTP, grp->singleq.rxqs[j]); > + } else { > + for (j = 0; j < grp->splitq.num_rxq_sets; j++) > + idpf_queue_set(PTP, > + &grp->splitq.rxq_sets[j]->rxq); > + } > + } > +} > +static void > +idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq, > + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, > + struct sk_buff *skb) > +{ > + u64 cached_time, ts_ns; > + u32 ts_high; > + > + if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) > + return; > + > + cached_time = READ_ONCE(rxq->cached_phc_time); > + > + ts_high = le32_to_cpu(rx_desc->ts_high); > + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high); > + > + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { > + .hwtstamp = ns_to_ktime(ts_ns), > + }; Simpler: skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
On 11/14/2024 9:54 PM, Willem de Bruijn wrote: > Milena Olech wrote: > > Add Rx timestamp function when the Rx timestamp value is read directly > > from the Rx descriptor. Add supported Rx timestamp modes. > > > > Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> > > Signed-off-by: Milena Olech <milena.olech@intel.com> > > --- > > drivers/net/ethernet/intel/idpf/idpf_ptp.c | 74 > > ++++++++++++++++++++- drivers/net/ethernet/intel/idpf/idpf_txrx.c | > > 30 +++++++++ drivers/net/ethernet/intel/idpf/idpf_txrx.h | 7 +- > > 3 files changed, 109 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > b/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > index f34642d10768..f9f7613f2a6d 100644 > > --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > @@ -317,12 +317,41 @@ static int idpf_ptp_gettimex64(struct ptp_clock_info *info, > > return 0; > > } > > > > +/** > > + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx > > + * queue group. > > Why does each receive group have a separate cached value? > They're all caches of the same device clock. That's correct - they all caches values of the same PHC, however I would like to have an effective method to access this value in hotpath where I'm extending the Rx timestamp value to 64 bit. For the same reason I cached Tx timestamp caps in idpf_vport_init_fast_path_txqs. > > > + * @grp: receive queue group in which Rx timestamp is enabled > > + * @split: Indicates whether the queue model is split or single queue > > + * @systime: Cached system time > > + */ > > +static void > > +idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split, > > + u64 systime) > > +{ > > + struct idpf_rx_queue *rxq; > > + u16 i; > > + > > + if (!split) { > > + for (i = 0; i < grp->singleq.num_rxq; i++) { > > + rxq = grp->singleq.rxqs[i]; > > + if (rxq) > > + WRITE_ONCE(rxq->cached_phc_time, systime); > > + } > > + } else { > > + for (i = 0; i < grp->splitq.num_rxq_sets; i++) { > > + rxq = &grp->splitq.rxq_sets[i]->rxq; > > + if (rxq) > > + WRITE_ONCE(rxq->cached_phc_time, systime); > > + } > > + } > > +} > > + > > > +/** > > + * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping > > + * @vport: Virtual port structure > > + * @rx_filter: bool value for whether timestamps are enabled or > > +disabled */ static void idpf_ptp_set_rx_tstamp(struct idpf_vport > > +*vport, int rx_filter) { > > + vport->tstamp_config.rx_filter = rx_filter; > > + > > + if (rx_filter == HWTSTAMP_FILTER_NONE) > > + return; > > Should this clear the bit if it was previously set, instead of returning immediately? > > + > > + for (u16 i = 0; i < vport->num_rxq_grp; i++) { > > + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; > > + u16 j; > > + > > + if (idpf_is_queue_model_split(vport->rxq_model)) { > > + for (j = 0; j < grp->singleq.num_rxq; j++) > > + idpf_queue_set(PTP, grp->singleq.rxqs[j]); > > + } else { > > + for (j = 0; j < grp->splitq.num_rxq_sets; j++) > > + idpf_queue_set(PTP, > > + &grp->splitq.rxq_sets[j]->rxq); > > + } > > + } > > +} > > > +static void > > +idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq, > > + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, > > + struct sk_buff *skb) > > +{ > > + u64 cached_time, ts_ns; > > + u32 ts_high; > > + > > + if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) > > + return; > > + > > + cached_time = READ_ONCE(rxq->cached_phc_time); > > + > > + ts_high = le32_to_cpu(rx_desc->ts_high); > > + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high); > > + > > + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { > > + .hwtstamp = ns_to_ktime(ts_ns), > > + }; > > Simpler: skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns); Thanks, Milena
Olech, Milena wrote: > On 11/14/2024 9:54 PM, Willem de Bruijn wrote: > > > Milena Olech wrote: > > > Add Rx timestamp function when the Rx timestamp value is read directly > > > from the Rx descriptor. Add supported Rx timestamp modes. > > > > > > Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> > > > Signed-off-by: Milena Olech <milena.olech@intel.com> > > > --- > > > drivers/net/ethernet/intel/idpf/idpf_ptp.c | 74 > > > ++++++++++++++++++++- drivers/net/ethernet/intel/idpf/idpf_txrx.c | > > > 30 +++++++++ drivers/net/ethernet/intel/idpf/idpf_txrx.h | 7 +- > > > 3 files changed, 109 insertions(+), 2 deletions(-) > > > > > > diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > > b/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > > index f34642d10768..f9f7613f2a6d 100644 > > > --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > > +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c > > > @@ -317,12 +317,41 @@ static int idpf_ptp_gettimex64(struct ptp_clock_info *info, > > > return 0; > > > } > > > > > > +/** > > > + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx > > > + * queue group. > > > > Why does each receive group have a separate cached value? > > They're all caches of the same device clock. > > That's correct - they all caches values of the same PHC, however I would > like to have an effective method to access this value in hotpath where > I'm extending the Rx timestamp value to 64 bit. > > For the same reason I cached Tx timestamp caps in > idpf_vport_init_fast_path_txqs. Oh to avoid reading a cold cacheline in the hot path. Okay, that makes sense. Please make a note of this in the commit message.
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c index f34642d10768..f9f7613f2a6d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -317,12 +317,41 @@ static int idpf_ptp_gettimex64(struct ptp_clock_info *info, return 0; } +/** + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx + * queue group. + * @grp: receive queue group in which Rx timestamp is enabled + * @split: Indicates whether the queue model is split or single queue + * @systime: Cached system time + */ +static void +idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split, + u64 systime) +{ + struct idpf_rx_queue *rxq; + u16 i; + + if (!split) { + for (i = 0; i < grp->singleq.num_rxq; i++) { + rxq = grp->singleq.rxqs[i]; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } else { + for (i = 0; i < grp->splitq.num_rxq_sets; i++) { + rxq = &grp->splitq.rxq_sets[i]->rxq; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } +} + /** * idpf_ptp_update_cached_phctime - Update the cached PHC time values * @adapter: Driver specific private structure * * This function updates the system time values which are cached in the adapter - * structure. + * structure and the Rx queues. * * This function must be called periodically to ensure that the cached value * is never more than 2 seconds old. @@ -345,6 +374,21 @@ static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter) WRITE_ONCE(adapter->ptp->cached_phc_time, systime); WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies); + idpf_for_each_vport(adapter, vport) { + bool split; + + if (!vport || !vport->rxq_grps) + continue; + + split = idpf_is_queue_model_split(vport->rxq_model); + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + + idpf_ptp_update_phctime_rxq_grp(grp, split, systime); + } + } + return 0; } @@ -605,6 +649,33 @@ int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, return 0; } +/** + * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping + * @vport: Virtual port structure + * @rx_filter: bool value for whether timestamps are enabled or disabled + */ +static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) +{ + vport->tstamp_config.rx_filter = rx_filter; + + if (rx_filter == HWTSTAMP_FILTER_NONE) + return; + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + u16 j; + + if (idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < grp->singleq.num_rxq; j++) + idpf_queue_set(PTP, grp->singleq.rxqs[j]); + } else { + for (j = 0; j < grp->splitq.num_rxq_sets; j++) + idpf_queue_set(PTP, + &grp->splitq.rxq_sets[j]->rxq); + } + } +} + /** * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode * @vport: Virtual port structure @@ -624,6 +695,7 @@ static int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, } vport->tstamp_config.tx_type = config->tx_type; + idpf_ptp_set_rx_tstamp(vport, config->rx_filter); return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b223000735f0..ba8265051ad7 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3168,6 +3168,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, return 0; } +/** + * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack + * @rxq: pointer to the rx queue that receives the timestamp + * @rx_desc: pointer to rx descriptor containing timestamp + * @skb: skb to put timestamp in + */ +static void +idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct sk_buff *skb) +{ + u64 cached_time, ts_ns; + u32 ts_high; + + if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) + return; + + cached_time = READ_ONCE(rxq->cached_phc_time); + + ts_high = le32_to_cpu(rx_desc->ts_high); + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high); + + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { + .hwtstamp = ns_to_ktime(ts_ns), + }; +} + /** * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor * @rxq: Rx descriptor ring packet is being transacted on @@ -3193,6 +3220,9 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* process RSS/hash */ idpf_rx_hash(rxq, skb, rx_desc, decoded); + if (idpf_queue_has(PTP, rxq)) + idpf_rx_hwtstamp(rxq, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rxq->netdev); if (le16_get_bits(rx_desc->hdrlen_flags, diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 2f8f2eab3d09..2c651fb9f96d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -290,6 +290,8 @@ struct idpf_ptype_state { * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) + * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the + * queue * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { @@ -300,6 +302,7 @@ enum idpf_queue_flags_t { __IDPF_Q_POLL_MODE, __IDPF_Q_CRC_EN, __IDPF_Q_HSPLIT_EN, + __IDPF_Q_PTP, __IDPF_Q_FLAGS_NBITS, }; @@ -491,6 +494,7 @@ struct idpf_txq_stash { * @next_to_alloc: RX buffer to allocate at * @skb: Pointer to the skb * @truesize: data buffer truesize in singleq + * @cached_phctime: Cached PHC time for the Rx queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_rx_queue_stats * @q_id: Queue id @@ -538,6 +542,7 @@ struct idpf_rx_queue { struct sk_buff *skb; u32 truesize; + u64 cached_phc_time; struct u64_stats_sync stats_sync; struct idpf_rx_queue_stats q_stats; @@ -557,7 +562,7 @@ struct idpf_rx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_rx_queue, 64, - 80 + sizeof(struct u64_stats_sync), + 88 + sizeof(struct u64_stats_sync), 32); /**