@@ -448,6 +448,7 @@ enum {
HCI_WIDEBAND_SPEECH_ENABLED,
HCI_EVENT_FILTER_CONFIGURED,
HCI_PA_SYNC,
+ HCI_SCO_FLOWCTL,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
@@ -1544,6 +1545,11 @@ struct hci_rp_read_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f
+struct hci_cp_write_sync_flowctl {
+ __u8 enable;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
struct hci_rp_read_page_scan_type {
__u8 status;
@@ -1857,6 +1857,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO)
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
@@ -185,3 +185,5 @@ int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
struct hci_conn_params *params);
+
+int hci_sco_flowctl_test_sync(struct hci_dev *hdev, struct hci_conn *conn);
@@ -3548,51 +3548,52 @@ static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
}
/* Schedule SCO */
-static void hci_sched_sco(struct hci_dev *hdev)
+static void hci_sched_sco_type(struct hci_dev *hdev, __u8 type)
{
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
+ unsigned int pkts;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "type %u", type);
- if (!hci_conn_num(hdev, SCO_LINK))
+ if (!hci_conn_num(hdev, type))
return;
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
+ /* Use sco_pkts if flow control has not been enabled yet which will
+ * limit the amount of buffer sent in a row.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
+ pkts = hdev->sco_pkts;
+ else
+ pkts = hdev->sco_cnt;
+
+ if (!pkts)
+ return;
+
+ while (pkts && (conn = hci_low_sent(hdev, type, "e))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(hdev, skb);
+ pkts--;
+
+ if (hdev->sco_cnt > 0)
+ hdev->sco_cnt--;
+
conn->sent++;
if (conn->sent == ~0)
conn->sent = 0;
}
}
-}
-static void hci_sched_esco(struct hci_dev *hdev)
-{
- struct hci_conn *conn;
- struct sk_buff *skb;
- int quote;
-
- BT_DBG("%s", hdev->name);
-
- if (!hci_conn_num(hdev, ESCO_LINK))
- return;
-
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
- "e))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
- hci_send_frame(hdev, skb);
-
- conn->sent++;
- if (conn->sent == ~0)
- conn->sent = 0;
- }
- }
+ /* Rescheduled if all packets were sent and flow control is not enabled
+ * as there could be more packets queued that could not be sent and
+ * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule
+ * needs to be forced.
+ */
+ if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
static void hci_sched_acl_pkt(struct hci_dev *hdev)
@@ -3628,8 +3629,8 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
chan->conn->sent++;
/* Send pending SCO packets right away */
- hci_sched_sco(hdev);
- hci_sched_esco(hdev);
+ hci_sched_sco_type(hdev, SCO_LINK);
+ hci_sched_sco_type(hdev, ESCO_LINK);
}
}
@@ -3684,8 +3685,8 @@ static void hci_sched_le(struct hci_dev *hdev)
chan->conn->sent++;
/* Send pending SCO packets right away */
- hci_sched_sco(hdev);
- hci_sched_esco(hdev);
+ hci_sched_sco_type(hdev, SCO_LINK);
+ hci_sched_sco_type(hdev, ESCO_LINK);
}
}
@@ -3730,8 +3731,8 @@ static void hci_tx_work(struct work_struct *work)
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
/* Schedule queues and send stuff to HCI driver */
- hci_sched_sco(hdev);
- hci_sched_esco(hdev);
+ hci_sched_sco_type(hdev, SCO_LINK);
+ hci_sched_sco_type(hdev, ESCO_LINK);
hci_sched_iso(hdev);
hci_sched_acl(hdev);
hci_sched_le(hdev);
@@ -4052,10 +4053,13 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- err = hci_send_frame(hdev, skb);
- if (err < 0) {
- hci_cmd_sync_cancel_sync(hdev, -err);
- return;
+ if (hci_skb_opcode(skb) != HCI_OP_NOP) {
+ err = hci_send_frame(hdev, skb);
+ if (err < 0) {
+ hci_cmd_sync_cancel_sync(hdev, -err);
+ return;
+ }
+ atomic_dec(&hdev->cmd_cnt);
}
if (hdev->req_status == HCI_REQ_PEND &&
@@ -4063,8 +4067,6 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
}
-
- atomic_dec(&hdev->cmd_cnt);
}
static void hci_cmd_work(struct work_struct *work)
@@ -4448,6 +4448,7 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
hdev->sco_cnt += count;
if (hdev->sco_cnt > hdev->sco_pkts)
hdev->sco_cnt = hdev->sco_pkts;
+
break;
case ISO_LINK:
@@ -4916,6 +4917,23 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
hci_dev_unlock(hdev);
}
+static int hci_sco_flowctl_test(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn *conn = data;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ err = hci_sco_flowctl_test_sync(hdev, conn);
+ if (err)
+ hci_dev_clear_flag(hdev, HCI_SCO_FLOWCTL);
+
+ hci_connect_cfm(conn, 0x00);
+
+ return err;
+}
+
static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -5021,7 +5039,14 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
}
}
- hci_connect_cfm(conn, status);
+ /* If SCO flow control is enable attempt to generate
+ * HCI_EV_NUM_COMP_PKTS by sending an empty packet.
+ */
+ if (!status && hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
+ hci_cmd_sync_queue(hdev, hci_sco_flowctl_test, conn, NULL);
+ else
+ hci_connect_cfm(conn, status);
+
if (status)
hci_conn_del(conn);
@@ -3769,6 +3769,27 @@ static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
sizeof(param), ¶m, HCI_CMD_TIMEOUT);
}
+/* Enable SCO flow control if supported */
+static int hci_write_sync_flowctl_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_sync_flowctl cp;
+ int err;
+
+ /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */
+ if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = 0x01;
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (!err)
+ hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL);
+
+ return err;
+}
+
/* BR Controller init stage 2 command sequence */
static const struct hci_init_stage br_init2[] = {
/* HCI_OP_READ_BUFFER_SIZE */
@@ -3787,6 +3808,8 @@ static const struct hci_init_stage br_init2[] = {
HCI_INIT(hci_clear_event_filter_sync),
/* HCI_OP_WRITE_CA_TIMEOUT */
HCI_INIT(hci_write_ca_timeout_sync),
+ /* HCI_OP_WRITE_SYNC_FLOWCTL */
+ HCI_INIT(hci_write_sync_flowctl_sync),
{}
};
@@ -6871,3 +6894,21 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
+
+int hci_sco_flowctl_test_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct sk_buff *skb;
+
+ /* Allocate an empty skb to trigger testing of flow control */
+ skb = bt_skb_alloc(0, GFP_KERNEL);
+ if (IS_ERR(skb))
+ return -PTR_ERR(skb);
+
+ /* Send the packet to trigger testing of flow control */
+ hci_send_sco(conn, skb);
+
+ /* Wait for HCI_EV_NUM_COMP_PKTS */
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
+ HCI_EV_NUM_COMP_PKTS,
+ conn->disc_timeout, NULL);
+}