@@ -7,6 +7,7 @@
* Author: Herve Codina <herve.codina@bootlin.com>
*/
+#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/hdlc.h>
#include <linux/mod_devicetable.h>
@@ -32,6 +33,7 @@ struct qmc_hdlc {
struct qmc_hdlc_desc tx_descs[8];
unsigned int tx_out;
struct qmc_hdlc_desc rx_descs[4];
+ u32 slot_map;
};
static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
@@ -206,6 +208,144 @@ static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
+static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
+ u32 slot_map, struct qmc_chan_ts_info *ts_info)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+
+ /* Tx and Rx available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(map, slot_map);
+ bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots %*pb -> (%*pb, %*pb)\n",
+ 64, map, 64, ts_mask_avail, 64, ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64);
+ ts_info->rx_ts_mask = ts_info->tx_ts_mask;
+ return 0;
+}
+
+static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
+ const struct qmc_chan_ts_info *ts_info, u32 *slot_map)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+ u32 array32[2];
+
+ /* Tx and Rx masks and available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
+ dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask, ts_info->tx_ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(ts_mask, ts_info->rx_ts_mask);
+ bitmap_gather(map, ts_mask, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%*pb, %*pb) -> %*pb\n",
+ 64, ts_mask_avail, 64, ts_mask, 64, map);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr32(array32, map, 64);
+ if (array32[1]) {
+ dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%*pb, %*pb) -> %*pb\n",
+ 64, ts_mask_avail, 64, ts_mask, 64, map);
+ return -EINVAL;
+ }
+
+ *slot_map = array32[0];
+ return 0;
+}
+
+static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1)
+{
+ struct qmc_chan_ts_info ts_info;
+ int ret;
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+ ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
+ if (ret)
+ return ret;
+
+ ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+
+ qmc_hdlc->slot_map = te1->slot_map;
+
+ return 0;
+}
+
+static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ te1_settings te1;
+
+ switch (ifs->type) {
+ case IF_GET_IFACE:
+ ifs->type = IF_IFACE_E1;
+ if (ifs->size < sizeof(te1)) {
+ if (!ifs->size)
+ return 0; /* only type requested */
+
+ ifs->size = sizeof(te1); /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ memset(&te1, 0, sizeof(te1));
+
+ /* Update slot_map */
+ te1.slot_map = qmc_hdlc->slot_map;
+
+ if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1)))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_E1:
+ case IF_IFACE_T1:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
+ return -EFAULT;
+
+ return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
+
+ default:
+ return hdlc_ioctl(netdev, ifs);
+ }
+}
+
static int qmc_hdlc_open(struct net_device *netdev)
{
struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
@@ -332,13 +472,14 @@ static const struct net_device_ops qmc_hdlc_netdev_ops = {
.ndo_open = qmc_hdlc_open,
.ndo_stop = qmc_hdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
+ .ndo_siocwandev = qmc_hdlc_ioctl,
};
static int qmc_hdlc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct qmc_hdlc *qmc_hdlc;
+ struct qmc_chan_ts_info ts_info;
struct qmc_chan_info info;
hdlc_device *hdlc;
int ret;
@@ -368,6 +509,15 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+ ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
+ if (ret)
+ return ret;
+
qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
if (!qmc_hdlc->netdev) {
dev_err(qmc_hdlc->dev, "failed to alloc hdlc dev\n");