@@ -22,6 +22,15 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+
+/* When multiple device groups are available with different combination of devices,
+ * it is possible that concurrent access of different device groups (ag) will
+ * happen for operations like find, add, destroy. Hence, protect it with help of
+ * ag_list_lock (ath12k_hw_group's list lock).
+ */
+static DEFINE_MUTEX(ath12k_ag_list_lock);
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -1238,27 +1247,112 @@ static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
&ab->panic_nb);
}
-int ath12k_core_init(struct ath12k_base *ab)
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
{
- int ret;
+ lockdep_assert_held(&ag->mutex_lock);
- ret = ath12k_core_soc_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *
+ath12k_core_hw_group_alloc(u8 id, u8 max_devices)
+{
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_ag_list_lock);
+
+ ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ if (!ag)
+ return NULL;
+
+ ag->id = id;
+ ag->num_devices = max_devices;
+ list_add(&ag->list, &ath12k_hw_group_list);
+ mutex_init(&ag->mutex_lock);
+
+ return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+ mutex_lock(&ath12k_ag_list_lock);
+
+ list_del(&ag->list);
+ kfree(ag);
+
+ mutex_unlock(&ath12k_ag_list_lock);
+}
+
+static struct ath12k_hw_group *ath12k_core_assign_hw_group(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ u32 group_id = ATH12K_INVALID_GROUP_ID;
+
+ lockdep_assert_held(&ath12k_ag_list_lock);
+
+ /* The grouping of multiple devices will be done based on device tree file.
+ * TODO: device tree file parsing to know about the devices involved in group.
+ *
+ * The platforms that do not have any valid group information would have each
+ * device to be part of its own invalid group.
+ *
+ * Currently, we are not parsing any device tree information and hence, grouping
+ * of multiple devices is not involved. Thus, single device is added to device
+ * group.
+ */
+ ag = ath12k_core_hw_group_alloc(group_id, 1);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
}
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "Single device is added to hardware group\n");
- ret = ath12k_core_panic_notifier_register(ab);
- if (ret)
- ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+ ab->device_id = ag->num_probed++;
+ ag->ab[ab->device_id] = ab;
+ ab->ag = ag;
- return 0;
+ return ag;
}
-void ath12k_core_deinit(struct ath12k_base *ab)
+void ath12k_core_unassign_hw_group(struct ath12k_base *ab)
{
- ath12k_core_panic_notifier_unregister(ab);
+ struct ath12k_hw_group *ag = ab->ag;
+ u8 device_id = ab->device_id;
+ int num_probed;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex_lock);
+
+ if (WARN_ON(device_id >= ag->num_devices)) {
+ mutex_unlock(&ag->mutex_lock);
+ return;
+ }
+
+ if (WARN_ON(ag->ab[device_id] != ab)) {
+ mutex_unlock(&ag->mutex_lock);
+ return;
+ }
+
+ ag->ab[device_id] = NULL;
+ ab->ag = NULL;
+ ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+ if (ag->num_probed)
+ ag->num_probed--;
+
+ num_probed = ag->num_probed;
+
+ mutex_unlock(&ag->mutex_lock);
+
+ if (!num_probed)
+ ath12k_core_hw_group_free(ag);
+}
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
mutex_lock(&ab->core_lock);
ath12k_hif_irq_disable(ab);
@@ -1268,8 +1362,116 @@ void ath12k_core_deinit(struct ath12k_base *ab)
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
- ath12k_core_soc_destroy(ab);
+ if (WARN_ON(!ag))
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_soc_destroy(ab);
+ }
+}
+
+static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex_lock);
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_device_cleanup(ab);
+ }
+ mutex_unlock(&ag->mutex_lock);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+ int i, ret;
+ struct ath12k_base *ab;
+
+ lockdep_assert_held(&ag->mutex_lock);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ mutex_unlock(&ab->core_lock);
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int ret;
+
+ ret = ath12k_core_panic_notifier_register(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+
+ mutex_lock(&ath12k_ag_list_lock);
+ ag = ath12k_core_assign_hw_group(ab);
+ if (!ag) {
+ mutex_unlock(&ath12k_ag_list_lock);
+ ath12k_warn(ab, "unable to get hw group\n");
+ return -ENODEV;
+ }
+ mutex_unlock(&ath12k_ag_list_lock);
+
+ mutex_lock(&ag->mutex_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices in group %d, num probed %d\n",
+ ag->num_devices, ag->num_probed);
+
+ if (ath12k_core_hw_group_create_ready(ag)) {
+ ret = ath12k_core_hw_group_create(ag);
+ if (ret) {
+ mutex_unlock(&ag->mutex_lock);
+ ath12k_warn(ab, "unable to create hw group\n");
+ goto err_hw_group;
+ }
+ }
+ mutex_unlock(&ag->mutex_lock);
+
+ return 0;
+
+err_hw_group:
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_unassign_hw_group(ab);
+ return ret;
+}
+
+void ath12k_core_deinit(struct ath12k_base *ab)
+{
+ ath12k_core_panic_notifier_unregister(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_unassign_hw_group(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -61,6 +61,10 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_MAX_SOCS 3
+#define ATH12K_INVALID_GROUP_ID 0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -740,6 +744,25 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
+/* Holds info on the group of devices that are registered as a single wiphy */
+struct ath12k_hw_group {
+ struct list_head list;
+ u8 id;
+ u8 num_devices;
+ u8 num_probed;
+ struct ath12k_base *ab[ATH12K_MAX_SOCS];
+
+ /* When multiple devices are involved in a group, QMI handshakes would be
+ * asynchronous between them but the group has to exchange few information
+ * in QMI/WMI such as partner device details only after all the devices in
+ * the group are ready.
+ *
+ * To synchronize the device group's create, assign, start, stop below
+ * lock would be helpful
+ */
+ struct mutex mutex_lock;
+};
+
/**
* enum ath12k_link_capable_flags - link capable flags
*
@@ -944,6 +967,8 @@ struct ath12k_base {
struct notifier_block panic_nb;
+ struct ath12k_hw_group *ag;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -974,6 +999,7 @@ int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_unassign_hw_group(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -1538,6 +1538,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
+ ath12k_core_unassign_hw_group(ab);
goto qmi_fail;
}