diff mbox series

[09/18] net: emac, cpsw: fix mixed module-builtin object (davinci_cpdma)

Message ID 20221119225650.1044591-10-alobakin@pm.me (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series treewide: fix object files shared between several modules | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Alexander Lobakin Nov. 19, 2022, 11:07 p.m. UTC
From: Masahiro Yamada <masahiroy@kernel.org>

CONFIG_TI_DAVINCI_EMAC, CONFIG_TI_CPSW and CONFIG_TI_CPSW_SWITCHDEV
are all tristate. This means that davinci_cpdma.o can be linked to
a module and also to vmlinux even though the expected CFLAGS are
different between builtins and modules.

This is the same situation as fixed by commit 637a642f5ca5 ("zstd:
Fixing mixed module-builtin objects").

Introduce the new module, ti_davinci_cpdma, to provide the common
functions to these three modules.

[ alobakin: add exports ]

Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
Reviewed-by: Alexander Lobakin <alobakin@pm.me>
Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 drivers/net/ethernet/ti/Kconfig         |  6 +++++
 drivers/net/ethernet/ti/Makefile        |  8 +++---
 drivers/net/ethernet/ti/cpsw.c          |  2 ++
 drivers/net/ethernet/ti/cpsw_new.c      |  2 ++
 drivers/net/ethernet/ti/davinci_cpdma.c | 33 +++++++++++++++++++++++++
 drivers/net/ethernet/ti/davinci_emac.c  |  2 ++
 6 files changed, 50 insertions(+), 3 deletions(-)

--
2.38.1

Comments

Masahiro Yamada Nov. 23, 2022, 4:04 p.m. UTC | #1
On Sun, Nov 20, 2022 at 8:07 AM Alexander Lobakin <alobakin@pm.me> wrote:
>
> From: Masahiro Yamada <masahiroy@kernel.org>
>
> CONFIG_TI_DAVINCI_EMAC, CONFIG_TI_CPSW and CONFIG_TI_CPSW_SWITCHDEV
> are all tristate. This means that davinci_cpdma.o can be linked to
> a module and also to vmlinux even though the expected CFLAGS are
> different between builtins and modules.
>
> This is the same situation as fixed by commit 637a642f5ca5 ("zstd:
> Fixing mixed module-builtin objects").
>
> Introduce the new module, ti_davinci_cpdma, to provide the common
> functions to these three modules.
>
> [ alobakin: add exports ]
>
> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
> Reviewed-by: Alexander Lobakin <alobakin@pm.me>
> Signed-off-by: Alexander Lobakin <alobakin@pm.me>
> ---

Please take the authorship for this patch
because I did not finish this patch
(and I am not sure if this is the correct way to fix)

As 18/18 will touch this part again,
perhaps davinci_cpdma.c can go into ti_cpsw_core.ko

Anyway, the maintainer may have a better insight.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index fce06663e1e1..2ac0adf0c07d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -17,9 +17,13 @@  config NET_VENDOR_TI

 if NET_VENDOR_TI

+config TI_DAVINCI_CPDMA
+	tristate
+
 config TI_DAVINCI_EMAC
 	tristate "TI DaVinci EMAC Support"
 	depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
+	select TI_DAVINCI_CPDMA
 	select TI_DAVINCI_MDIO
 	select PHYLIB
 	select GENERIC_ALLOCATOR
@@ -51,6 +55,7 @@  config TI_CPSW
 	tristate "TI CPSW Switch Support"
 	depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
 	depends on TI_CPTS || !TI_CPTS
+	select TI_DAVINCI_CPDMA
 	select TI_DAVINCI_MDIO
 	select MFD_SYSCON
 	select PAGE_POOL
@@ -68,6 +73,7 @@  config TI_CPSW_SWITCHDEV
 	depends on NET_SWITCHDEV
 	depends on TI_CPTS || !TI_CPTS
 	select PAGE_POOL
+	select TI_DAVINCI_CPDMA
 	select TI_DAVINCI_MDIO
 	select MFD_SYSCON
 	select REGMAP
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 75f761efbea7..28a741ed0ac8 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -9,15 +9,17 @@  obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o

 obj-$(CONFIG_TLAN) += tlan.o
 obj-$(CONFIG_CPMAC) += cpmac.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += ti_davinci_cpdma.o
+ti_davinci_cpdma-y := davinci_cpdma.o
 obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
-ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
+ti_davinci_emac-y := davinci_emac.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
 obj-$(CONFIG_TI_CPTS) += cpts.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
+ti_cpsw-y := cpsw.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
 obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
-ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o
+ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o

 obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
 keystone_netcp-y := netcp_core.o cpsw_ale.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 13c9c2d6b79b..b7ac61329b20 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1796,6 +1796,8 @@  static struct platform_driver cpsw_driver = {

 module_platform_driver(cpsw_driver);

+MODULE_IMPORT_NS(TI_DAVINCI_CPDMA);
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 83596ec0c7cb..9ed398c04c04 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -2116,5 +2116,7 @@  static struct platform_driver cpsw_driver = {

 module_platform_driver(cpsw_driver);

+MODULE_IMPORT_NS(TI_DAVINCI_CPDMA);
+
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d2eab5cd1e0c..32ba94626cda 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -531,6 +531,7 @@  struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
 		ctlr->num_chan = CPDMA_MAX_CHANNELS;
 	return ctlr;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctlr_create, TI_DAVINCI_CPDMA);

 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
 {
@@ -591,6 +592,7 @@  int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctlr_start, TI_DAVINCI_CPDMA);

 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 {
@@ -623,6 +625,7 @@  int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctlr_stop, TI_DAVINCI_CPDMA);

 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 {
@@ -640,6 +643,7 @@  int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 	cpdma_desc_pool_destroy(ctlr);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctlr_destroy, TI_DAVINCI_CPDMA);

 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 {
@@ -660,21 +664,25 @@  int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctlr_int_ctrl, TI_DAVINCI_CPDMA);

 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
 {
 	dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctlr_eoi, TI_DAVINCI_CPDMA);

 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
 {
 	return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctrl_rxchs_state, TI_DAVINCI_CPDMA);

 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
 {
 	return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_ctrl_txchs_state, TI_DAVINCI_CPDMA);

 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
 				 int rx, int desc_num,
@@ -802,6 +810,7 @@  int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_set_weight, TI_DAVINCI_CPDMA);

 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
  * Should be called before cpdma_chan_set_rate.
@@ -816,6 +825,7 @@  u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)

 	return DIV_ROUND_UP(divident, divisor);
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_get_min_rate, TI_DAVINCI_CPDMA);

 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
  * The bandwidth * limited channels have to be in order beginning from lowest.
@@ -860,6 +870,7 @@  int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_set_rate, TI_DAVINCI_CPDMA);

 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
 {
@@ -872,6 +883,7 @@  u32 cpdma_chan_get_rate(struct cpdma_chan *ch)

 	return rate;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_get_rate, TI_DAVINCI_CPDMA);

 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 				     cpdma_handler_fn handler, int rx_type)
@@ -931,6 +943,7 @@  struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return chan;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_create, TI_DAVINCI_CPDMA);

 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
 {
@@ -943,6 +956,7 @@  int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)

 	return desc_num;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_get_rx_buf_num, TI_DAVINCI_CPDMA);

 int cpdma_chan_destroy(struct cpdma_chan *chan)
 {
@@ -964,6 +978,7 @@  int cpdma_chan_destroy(struct cpdma_chan *chan)
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_destroy, TI_DAVINCI_CPDMA);

 int cpdma_chan_get_stats(struct cpdma_chan *chan,
 			 struct cpdma_chan_stats *stats)
@@ -976,6 +991,7 @@  int cpdma_chan_get_stats(struct cpdma_chan *chan,
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_get_stats, TI_DAVINCI_CPDMA);

 static void __cpdma_chan_submit(struct cpdma_chan *chan,
 				struct cpdma_desc __iomem *desc)
@@ -1100,6 +1116,7 @@  int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_idle_submit, TI_DAVINCI_CPDMA);

 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
 				  dma_addr_t data, int len, int directed)
@@ -1125,6 +1142,7 @@  int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_idle_submit_mapped, TI_DAVINCI_CPDMA);

 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
 		      int len, int directed)
@@ -1150,6 +1168,7 @@  int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_submit, TI_DAVINCI_CPDMA);

 int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
 			     dma_addr_t data, int len, int directed)
@@ -1175,6 +1194,7 @@  int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_submit_mapped, TI_DAVINCI_CPDMA);

 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
 {
@@ -1189,6 +1209,7 @@  bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return free_tx_desc;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_check_free_tx_desc, TI_DAVINCI_CPDMA);

 static void __cpdma_chan_free(struct cpdma_chan *chan,
 			      struct cpdma_desc __iomem *desc,
@@ -1289,6 +1310,7 @@  int cpdma_chan_process(struct cpdma_chan *chan, int quota)
 	}
 	return used;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_process, TI_DAVINCI_CPDMA);

 int cpdma_chan_start(struct cpdma_chan *chan)
 {
@@ -1308,6 +1330,7 @@  int cpdma_chan_start(struct cpdma_chan *chan)

 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_start, TI_DAVINCI_CPDMA);

 int cpdma_chan_stop(struct cpdma_chan *chan)
 {
@@ -1370,6 +1393,7 @@  int cpdma_chan_stop(struct cpdma_chan *chan)
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_stop, TI_DAVINCI_CPDMA);

 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
 {
@@ -1387,6 +1411,7 @@  int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)

 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_chan_int_ctrl, TI_DAVINCI_CPDMA);

 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
 {
@@ -1399,6 +1424,7 @@  int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)

 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_control_get, TI_DAVINCI_CPDMA);

 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
 {
@@ -1411,16 +1437,19 @@  int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)

 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_control_set, TI_DAVINCI_CPDMA);

 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
 {
 	return ctlr->num_rx_desc;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_get_num_rx_descs, TI_DAVINCI_CPDMA);

 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
 {
 	return ctlr->num_tx_desc;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_get_num_tx_descs, TI_DAVINCI_CPDMA);

 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
 {
@@ -1442,3 +1471,7 @@  int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)

 	return ret;
 }
+EXPORT_SYMBOL_NS_GPL(cpdma_set_num_rx_descs, TI_DAVINCI_CPDMA);
+
+MODULE_DESCRIPTION("TI CPDMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2eb9d5a32588..897def12e6ec 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2103,6 +2103,8 @@  static void __exit davinci_emac_exit(void)
 }
 module_exit(davinci_emac_exit);

+MODULE_IMPORT_NS(TI_DAVINCI_CPDMA);
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>");
 MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>");