From patchwork Sat Jan 28 14:06:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13119856 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C367FC61D97 for ; Sat, 28 Jan 2023 14:07:10 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234641AbjA1OHJ (ORCPT ); Sat, 28 Jan 2023 09:07:09 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51586 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234165AbjA1OHG (ORCPT ); Sat, 28 Jan 2023 09:07:06 -0500 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9971C49562; Sat, 28 Jan 2023 06:06:58 -0800 (PST) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 1B1F660C15; Sat, 28 Jan 2023 14:06:58 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 00AAAC4339C; Sat, 28 Jan 2023 14:06:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1674914817; bh=frQ6wryTRsgHtvKmNSpqoCYSBl1MocigbxPElIXBFRE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=CQU+4+M94huZLCDt2JFOYfIudqUcIirXJIALtBoVOSvs/bkJap1FlcIjK1gsmnK0Q EbfykTE7taePGavINPYVlf3waMDGz4c5tWVA3Nl9NKth4PoiE/ZvsC7OLHNHMI+yyR MZ9i0164oqPT8NO2U0Wcakkx99/obQN6SQDv69VR5B3AUvoozYuvtL0rQHf+fHdwfW YL5Yw58pFnUBC+JPiDoFfbn4edcCaeglxq9Fw+I7hYHY/qy1fjoJPdrm7KNNBHw2Zi W7DEcgn0hsyAglYMRRm7gmVccRRTWfgf8nRoSX5Vp+d/XYt0BVReL+Say9vSFV+O2g ockXjSFCSMwUg== From: Lorenzo Bianconi To: bpf@vger.kernel.org Cc: netdev@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, davem@davemloft.net, kuba@kernel.org, pabeni@redhat.com, edumazet@google.com, hawk@kernel.org, toke@redhat.com, memxor@gmail.com, alardam@gmail.com, saeedm@nvidia.com, anthony.l.nguyen@intel.com, gospo@broadcom.com, vladimir.oltean@nxp.com, nbd@nbd.name, john@phrozen.org, leon@kernel.org, simon.horman@corigine.com, aelior@marvell.com, christophe.jaillet@wanadoo.fr, ecree.xilinx@gmail.com, mst@redhat.com, bjorn@kernel.org, magnus.karlsson@intel.com, maciej.fijalkowski@intel.com, intel-wired-lan@lists.osuosl.org, lorenzo.bianconi@redhat.com, martin.lau@linux.dev, sdf@google.com Subject: [PATCH v4 bpf-next 4/8] libbpf: add the capability to specify netlink proto in libbpf_netlink_send_recv Date: Sat, 28 Jan 2023 15:06:15 +0100 Message-Id: X-Mailer: git-send-email 2.39.1 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: bpf@iogearbox.net This is a preliminary patch in order to introduce netlink_generic protocol support to libbpf. Signed-off-by: Lorenzo Bianconi --- tools/lib/bpf/netlink.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index 35104580870c..d2468a04a6c3 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -41,7 +41,7 @@ struct xdp_id_md { struct xdp_link_info info; }; -static int libbpf_netlink_open(__u32 *nl_pid) +static int libbpf_netlink_open(__u32 *nl_pid, int proto) { struct sockaddr_nl sa; socklen_t addrlen; @@ -51,7 +51,7 @@ static int libbpf_netlink_open(__u32 *nl_pid) memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; - sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); + sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto); if (sock < 0) return -errno; @@ -212,14 +212,14 @@ static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq, } static int libbpf_netlink_send_recv(struct libbpf_nla_req *req, - __dump_nlmsg_t parse_msg, + int proto, __dump_nlmsg_t parse_msg, libbpf_dump_nlmsg_t parse_attr, void *cookie) { __u32 nl_pid = 0; int sock, ret; - sock = libbpf_netlink_open(&nl_pid); + sock = libbpf_netlink_open(&nl_pid, proto); if (sock < 0) return sock; @@ -271,7 +271,7 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, } nlattr_end_nested(&req, nla); - return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); + return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts) @@ -382,7 +382,7 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) xdp_id.ifindex = ifindex; xdp_id.flags = xdp_flags; - err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg, + err = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, __dump_link_nlmsg, get_xdp_info, &xdp_id); if (err) return libbpf_err(err); @@ -493,7 +493,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags) if (ret < 0) return ret; - return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); + return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } static int tc_qdisc_create_excl(struct bpf_tc_hook *hook) @@ -673,7 +673,8 @@ int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) info.opts = opts; - ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info); + ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL, + &info); if (ret < 0) return libbpf_err(ret); if (!info.processed) @@ -739,7 +740,7 @@ static int __bpf_tc_detach(const struct bpf_tc_hook *hook, return ret; } - return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); + return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } int bpf_tc_detach(const struct bpf_tc_hook *hook, @@ -804,7 +805,8 @@ int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) info.opts = opts; - ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info); + ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL, + &info); if (ret < 0) return libbpf_err(ret); if (!info.processed)