From patchwork Wed Jun 8 20:04:47 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 12874622 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 539FBCCA47A for ; Wed, 8 Jun 2022 20:05:34 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234318AbiFHUFc (ORCPT ); Wed, 8 Jun 2022 16:05:32 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59028 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234571AbiFHUF2 (ORCPT ); Wed, 8 Jun 2022 16:05:28 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 49C66427D7; Wed, 8 Jun 2022 13:05:27 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 9A3AF61C83; Wed, 8 Jun 2022 20:05:26 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id E660DC34116; Wed, 8 Jun 2022 20:05:25 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1654718726; bh=TwH4Lb8w6uD28tQcmV/MNWC03dk3ro1IAF1d1rTPEHU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cJ65R+ULec87yTNuyPYVMAiyenOM+QNB/NKj3QRPjjFgxYyEGGRpNDDyTwkk1y/td e54qjJxLMYRO/eaZamww4nehksdsIetCmcEI4hsgJVk1pcuBMiZVYNkEul5nX4hZT8 aqedu/6r7LVcLivhDOPlyFO8kRRq3olwdEURCY3X1Qge1HWur6jVHQZPzUV65S0Ap+ 5oFuDI9himiTKgBlIwFzC99Z27iB0aYGVpY5ghii42nbvrJdD8Bt64DG3oou9aXnFe z8HRB7ZyDpF6oqlgLgIGNF8zUiWFLfskkSQuN4qrF5UgzrZdvKAvid1TTcNeUETy1g 6FMytS52L42sw== From: Saeed Mahameed To: Leon Romanovsky , Saeed Mahameed Cc: Jason Gunthorpe , "David S. Miller" , Jakub Kicinski , Paolo Abeni , Eric Dumazet , netdev@vger.kernel.org, linux-rdma@vger.kernel.org, Jianbo Liu , Ariel Levkovich Subject: [PATCH mlx5-next 1/6] net/mlx5: Add IFC bits and enums for flow meter Date: Wed, 8 Jun 2022 13:04:47 -0700 Message-Id: <20220608200452.43880-2-saeed@kernel.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220608200452.43880-1-saeed@kernel.org> References: <20220608200452.43880-1-saeed@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Jianbo Liu Add/extend structure layouts and defines for flow meter. Signed-off-by: Jianbo Liu Reviewed-by: Ariel Levkovich Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 1 + include/linux/mlx5/mlx5_ifc.h | 114 ++++++++++++++++++++++++++++++++-- 2 files changed, 111 insertions(+), 4 deletions(-) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 604b85dd770a..15ac02eeed4f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -455,6 +455,7 @@ enum { MLX5_OPCODE_UMR = 0x25, + MLX5_OPCODE_ACCESS_ASO = 0x2d, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index fd7d083a34d3..a81f86620a10 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -442,7 +442,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 max_modify_header_actions[0x8]; u8 max_ft_level[0x8]; - u8 reserved_at_40[0x20]; + u8 reserved_at_40[0x6]; + u8 execute_aso[0x1]; + u8 reserved_at_47[0x19]; u8 reserved_at_60[0x2]; u8 reformat_insert[0x1]; @@ -940,7 +942,17 @@ struct mlx5_ifc_qos_cap_bits { u8 max_tsar_bw_share[0x20]; - u8 reserved_at_100[0x700]; + u8 reserved_at_100[0x20]; + + u8 reserved_at_120[0x3]; + u8 log_meter_aso_granularity[0x5]; + u8 reserved_at_128[0x3]; + u8 log_meter_aso_max_alloc[0x5]; + u8 reserved_at_130[0x3]; + u8 log_max_num_meter_aso[0x5]; + u8 reserved_at_138[0x8]; + + u8 reserved_at_140[0x6c0]; }; struct mlx5_ifc_debug_cap_bits { @@ -3277,6 +3289,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, + MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000, }; enum { @@ -3292,6 +3305,38 @@ struct mlx5_ifc_vlan_bits { u8 vid[0xc]; }; +enum { + MLX5_FLOW_METER_COLOR_RED = 0x0, + MLX5_FLOW_METER_COLOR_YELLOW = 0x1, + MLX5_FLOW_METER_COLOR_GREEN = 0x2, + MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3, +}; + +enum { + MLX5_EXE_ASO_FLOW_METER = 0x2, +}; + +struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits { + u8 return_reg_id[0x4]; + u8 aso_type[0x4]; + u8 reserved_at_8[0x14]; + u8 action[0x1]; + u8 init_color[0x2]; + u8 meter_id[0x1]; +}; + +union mlx5_ifc_exe_aso_ctrl { + struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter; +}; + +struct mlx5_ifc_execute_aso_bits { + u8 valid[0x1]; + u8 reserved_at_1[0x7]; + u8 aso_object_id[0x18]; + + union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl; +}; + struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan; @@ -3323,7 +3368,9 @@ struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_fte_match_param_bits match_value; - u8 reserved_at_1200[0x600]; + struct mlx5_ifc_execute_aso_bits execute_aso[4]; + + u8 reserved_at_1300[0x500]; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[]; }; @@ -5970,7 +6017,9 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits { u8 obj_id[0x20]; - u8 reserved_at_60[0x20]; + u8 reserved_at_60[0x3]; + u8 log_obj_range[0x5]; + u8 reserved_at_68[0x18]; }; struct mlx5_ifc_general_obj_out_cmd_hdr_bits { @@ -11370,12 +11419,14 @@ enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, + MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, }; enum { @@ -11448,6 +11499,61 @@ struct mlx5_ifc_create_encryption_key_in_bits { struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; }; +enum { + MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0, + MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1, + MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2, + MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3, +}; + +struct mlx5_ifc_flow_meter_parameters_bits { + u8 valid[0x1]; + u8 bucket_overflow[0x1]; + u8 start_color[0x2]; + u8 both_buckets_on_green[0x1]; + u8 reserved_at_5[0x1]; + u8 meter_mode[0x2]; + u8 reserved_at_8[0x18]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x3]; + u8 cbs_exponent[0x5]; + u8 cbs_mantissa[0x8]; + u8 reserved_at_50[0x3]; + u8 cir_exponent[0x5]; + u8 cir_mantissa[0x8]; + + u8 reserved_at_60[0x20]; + + u8 reserved_at_80[0x3]; + u8 ebs_exponent[0x5]; + u8 ebs_mantissa[0x8]; + u8 reserved_at_90[0x3]; + u8 eir_exponent[0x5]; + u8 eir_mantissa[0x8]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_flow_meter_aso_obj_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x8]; + u8 meter_aso_access_pd[0x18]; + + u8 reserved_at_a0[0x160]; + + struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2]; +}; + +struct mlx5_ifc_create_flow_meter_aso_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj; +}; + struct mlx5_ifc_sampler_obj_bits { u8 modify_field_select[0x40]; From patchwork Wed Jun 8 20:04:48 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 12874621 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5B1DEC43334 for ; Wed, 8 Jun 2022 20:05:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229527AbiFHUFc (ORCPT ); Wed, 8 Jun 2022 16:05:32 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59030 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234918AbiFHUF2 (ORCPT ); Wed, 8 Jun 2022 16:05:28 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E822B374278; Wed, 8 Jun 2022 13:05:27 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 792F261C5E; Wed, 8 Jun 2022 20:05:27 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id C8BC2C34116; Wed, 8 Jun 2022 20:05:26 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1654718726; bh=qSZ7c0PaCVngExyoBTV64R514xcCKTUwvjddtm4ngjk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=j+iW84UzSi9cA25QJe/Umv+WIF02rqQuRIjc/Sl5LmM5622ZL+QrVVP9/e7A/4GvE WjEclXaCrMW2HvzeeB6kxOKya/kQ2T3QIxMb+CNMoSVcNDp8LWq4tPOilguLqaOAvI f4wabkYQrmcS+gJHc0V4NuGdjXLymSxLGqTFXt7n7SvjrgpB5kiUytl38dREb0enxL BK0eaWR6GFHyIJSo+CfVGQ27Hritub55Y12pB5CXlpAFWtgiv1aSiquagRg5NMIYrA mLNmy2v9I6MhZsgOdC6nmVq89r3sdFz6EtFJ0Q5jVOL8QU8PWDaf6k3RekEIjdOBXg MxvufioEmEobw== From: Saeed Mahameed To: Leon Romanovsky , Saeed Mahameed Cc: Jason Gunthorpe , "David S. Miller" , Jakub Kicinski , Paolo Abeni , Eric Dumazet , netdev@vger.kernel.org, linux-rdma@vger.kernel.org, Michael Guralnik Subject: [PATCH mlx5-next 2/6] net/mlx5: Add HW definitions of vport debug counters Date: Wed, 8 Jun 2022 13:04:48 -0700 Message-Id: <20220608200452.43880-3-saeed@kernel.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220608200452.43880-1-saeed@kernel.org> References: <20220608200452.43880-1-saeed@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Saeed Mahameed total_q_under_processor_handle - number of queues in error state due to an async error or errored command. send_queue_priority_update_flow - number of QP/SQ priority/SL update events. cq_overrun - number of times CQ entered an error state due to an overflow. async_eq_overrun -number of time an EQ mapped to async events was overrun. comp_eq_overrun - number of time an EQ mapped to completion events was overrun. quota_exceeded_command - number of commands issued and failed due to quota exceeded. invalid_command - number of commands issued and failed dues to any reason other than quota exceeded. Signed-off-by: Saeed Mahameed Signed-off-by: Michael Guralnik --- include/linux/mlx5/mlx5_ifc.h | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a81f86620a10..585d246cef3b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1438,7 +1438,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_120[0xa]; u8 log_max_ra_req_dc[0x6]; - u8 reserved_at_130[0xa]; + u8 reserved_at_130[0x9]; + u8 vnic_env_cq_overrun[0x1]; u8 log_max_ra_res_dc[0x6]; u8 reserved_at_140[0x5]; @@ -1633,7 +1634,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 nic_receive_steering_discard[0x1]; u8 receive_discard_vport_down[0x1]; u8 transmit_discard_vport_down[0x1]; - u8 reserved_at_343[0x5]; + u8 eq_overrun_count[0x1]; + u8 reserved_at_344[0x1]; + u8 invalid_command_count[0x1]; + u8 quota_exceeded_count[0x1]; + u8 reserved_at_347[0x1]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; @@ -3438,11 +3443,21 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits { u8 transmit_discard_vport_down[0x40]; - u8 reserved_at_140[0xa0]; + u8 async_eq_overrun[0x20]; + + u8 comp_eq_overrun[0x20]; + + u8 reserved_at_180[0x20]; + + u8 invalid_command[0x20]; + + u8 quota_exceeded_command[0x20]; u8 internal_rq_out_of_buffer[0x20]; - u8 reserved_at_200[0xe00]; + u8 cq_overrun[0x20]; + + u8 reserved_at_220[0xde0]; }; struct mlx5_ifc_traffic_counter_bits { From patchwork Wed Jun 8 20:04:49 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 12874624 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 47EF2CCA480 for ; Wed, 8 Jun 2022 20:05:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235181AbiFHUFe (ORCPT ); Wed, 8 Jun 2022 16:05:34 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58652 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230236AbiFHUFb (ORCPT ); Wed, 8 Jun 2022 16:05:31 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 443F2374EE6; Wed, 8 Jun 2022 13:05:30 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id EEB30B82AC0; Wed, 8 Jun 2022 20:05:28 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A3AFAC34116; Wed, 8 Jun 2022 20:05:27 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1654718727; bh=YUf1RPgOPQfuIq0KT9knZ2xE4fv4XzrMdhMkJrF99QY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=dh+ewf5Y80pV2gD1trZ5EhG72NV8prgOvYke9nC0WA+qKqQMvOnqgeAGZYGKDM0nO a1dn8X0uvqucjTq13VQt1x9SQR025hUsfsCOw0TAIgfteIpmWchwylXWhs1ThbsYgZ bCuKmyXZn9zjXQrXWU+E3ZYOWUcUqTFbVf1EoJp8rcIBOIoj2foyhyIxLqx4DsZsFr x0lfOGbmCUovp7R+sal8r8qeRDxaWWHZpx8AXQYHAjL68X0JrBvjFFVLjVNTEzWNQZ jljEOVyPVHKyOkHHxXqTCGyrncqGi4zw9ChKPIYzFbIxDS8GfiXIArHOo2/sGgZIh7 U0Y3ijWJd7fQg== From: Saeed Mahameed To: Leon Romanovsky , Saeed Mahameed Cc: Jason Gunthorpe , "David S. Miller" , Jakub Kicinski , Paolo Abeni , Eric Dumazet , netdev@vger.kernel.org, linux-rdma@vger.kernel.org, Jianbo Liu , Roi Dayan Subject: [PATCH mlx5-next 3/6] net/mlx5: Add support EXECUTE_ASO action for flow entry Date: Wed, 8 Jun 2022 13:04:49 -0700 Message-Id: <20220608200452.43880-4-saeed@kernel.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220608200452.43880-1-saeed@kernel.org> References: <20220608200452.43880-1-saeed@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Jianbo Liu Attach flow meter to FTE with object id and index. Use metadata register C5 to store the packet color meter result. Signed-off-by: Jianbo Liu Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 33 +++++++++++++++++++ include/linux/mlx5/fs.h | 14 ++++++++ 2 files changed, 47 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 2ccf7bef9b05..735dc805dad7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -479,6 +479,30 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, return 0; } + +static void +mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context) +{ + void *exe_aso_ctrl; + void *execute_aso; + + execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context, + execute_aso[0]); + MLX5_SET(execute_aso, execute_aso, valid, 1); + MLX5_SET(execute_aso, execute_aso, aso_object_id, + fte->action.exe_aso.object_id); + + exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl); + MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id, + fte->action.exe_aso.return_reg_id); + MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type, + fte->action.exe_aso.type); + MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color, + fte->action.exe_aso.flow_meter.init_color); + MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id, + fte->action.exe_aso.flow_meter.meter_idx); +} + static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, int opmod, int modify_mask, struct mlx5_flow_table *ft, @@ -663,6 +687,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { + mlx5_cmd_set_fte_flow_meter(fte, in_flow_context); + } else { + err = -EOPNOTSUPP; + goto err_out; + } + } + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); err_out: kvfree(in); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8135713b0d2d..ece3e35622d7 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -212,6 +212,19 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); +struct mlx5_exe_aso { + u32 object_id; + u8 type; + u8 return_reg_id; + union { + u32 ctrl_data; + struct { + u8 meter_idx; + u8 init_color; + } flow_meter; + }; +}; + struct mlx5_fs_vlan { u16 ethtype; u16 vid; @@ -237,6 +250,7 @@ struct mlx5_flow_act { struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; struct mlx5_flow_group *fg; + struct mlx5_exe_aso exe_aso; }; #define MLX5_DECLARE_FLOW_ACT(name) \ From patchwork Wed Jun 8 20:04:50 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 12874623 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 80F4AC433EF for ; Wed, 8 Jun 2022 20:05:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230073AbiFHUFd (ORCPT ); Wed, 8 Jun 2022 16:05:33 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59296 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235843AbiFHUFa (ORCPT ); Wed, 8 Jun 2022 16:05:30 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BA99D374263; Wed, 8 Jun 2022 13:05:29 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 4373761C67; Wed, 8 Jun 2022 20:05:29 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 82D34C3411C; Wed, 8 Jun 2022 20:05:28 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1654718728; bh=rQOZdTGUzVpAQ8lvgTxnLrp3uf90qnRhjfVxe0pcF3U=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Ps8vaCPUrkr0pHOk16eTTHngqKVwyA5d2GAeGPAFXMTC8So5w05eP+Z3SLVOPQFxL dQu5kFShGdczZaT5G0quXeznNkgNJFAbFVdeaBcs7YA/V7NT+Iat0zcUTXSXZiscdM zQUOAWJ9hZHsLw57ft/7VoMoBRMMHTuKN/GkUIIuzh49QIhlgjdxoBM3XCp0A6nssr YttcNY6zK6cTG7/lVHuoskjTZ8QvJfW77Vr+/c5XnZSmD+EqTmNpjQW3KlpoUriscE zL0zL9FG5dogjR5xGQWdeajO7KWH85d7CO/KftWPiUcKDl5wMPVqceb4v5c2kqABIw 8QBkyDhMbqcKQ== From: Saeed Mahameed To: Leon Romanovsky , Saeed Mahameed Cc: Jason Gunthorpe , "David S. Miller" , Jakub Kicinski , Paolo Abeni , Eric Dumazet , netdev@vger.kernel.org, linux-rdma@vger.kernel.org, Shay Drory Subject: [PATCH mlx5-next 4/6] net/mlx5: group fdb cleanup to single function Date: Wed, 8 Jun 2022 13:04:50 -0700 Message-Id: <20220608200452.43880-5-saeed@kernel.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220608200452.43880-1-saeed@kernel.org> References: <20220608200452.43880-1-saeed@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Shay Drory Currently, the allocation of fdb software objects are done is single function, oppose to the cleanup of them. Group the cleanup of fdb software objects to single function. Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fdcf7f529330..14187e50e2f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2866,6 +2866,14 @@ static int create_fdb_bypass(struct mlx5_flow_steering *steering) return 0; } +static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering) +{ + cleanup_root_ns(steering->fdb_root_ns); + steering->fdb_root_ns = NULL; + kfree(steering->fdb_sub_ns); + steering->fdb_sub_ns = NULL; +} + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *maj_prio; @@ -2916,10 +2924,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) return 0; out_err: - cleanup_root_ns(steering->fdb_root_ns); - kfree(steering->fdb_sub_ns); - steering->fdb_sub_ns = NULL; - steering->fdb_root_ns = NULL; + cleanup_fdb_root_ns(steering); return err; } @@ -3079,10 +3084,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev) struct mlx5_flow_steering *steering = dev->priv.steering; cleanup_root_ns(steering->root_ns); - cleanup_root_ns(steering->fdb_root_ns); - steering->fdb_root_ns = NULL; - kfree(steering->fdb_sub_ns); - steering->fdb_sub_ns = NULL; + cleanup_fdb_root_ns(steering); cleanup_root_ns(steering->port_sel_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); From patchwork Wed Jun 8 20:04:51 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 12874625 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id BC194C43334 for ; Wed, 8 Jun 2022 20:05:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234417AbiFHUFf (ORCPT ); Wed, 8 Jun 2022 16:05:35 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58912 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233791AbiFHUFb (ORCPT ); Wed, 8 Jun 2022 16:05:31 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 76111374EEB; Wed, 8 Jun 2022 13:05:30 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 1266661C5E; Wed, 8 Jun 2022 20:05:30 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 63D3DC34116; Wed, 8 Jun 2022 20:05:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1654718729; bh=pdxv8fPnJR9QcNRfmvbcq1nieY8myrWQmFjIeQniGjg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pUoKwSqVZjUW6Rs2/GAsUgOfrdcAMHShcQ+wdKrsP6CP+rvu+YEBYqR0dRqcAv2yx O4+298seRdH4q56YYZAPS5akaDV+6lIWxhc1czMgK8TS9nyoIExtM4igFJRz/zq/nS 7uUCFDqzM+6pSPio0b7O4k/v7rvsBGa6mEWOctzXZ6fORKquKFq42eMw4aXBAU2HtW pAzP/07zS0MKnmE1KK7n4le9RyjuD40jMnqf3T3FuTK5tDJsbk/3KH57ouP8SRnFej Ry3h4fzOrfnUfL4P2RBsKXqLKS+F+/g1t93B/wEPoOSV7g6XyaDWL1Be1pQpeO5RUr V3cGU/v7OBBIw== From: Saeed Mahameed To: Leon Romanovsky , Saeed Mahameed Cc: Jason Gunthorpe , "David S. Miller" , Jakub Kicinski , Paolo Abeni , Eric Dumazet , netdev@vger.kernel.org, linux-rdma@vger.kernel.org, Shay Drory Subject: [PATCH mlx5-next 5/6] net/mlx5: Remove not used MLX5_CAP_BITS_RW_MASK Date: Wed, 8 Jun 2022 13:04:51 -0700 Message-Id: <20220608200452.43880-6-saeed@kernel.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220608200452.43880-1-saeed@kernel.org> References: <20220608200452.43880-1-saeed@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Shay Drory Remove not used MLX5_CAP_BITS_RW_MASK. While at it, remove CAP_MASK, MLX5_CAP_OFF_CMDIF_CSUM and MLX5_DEV_CAP_FLAG_*, since MLX5_CAP_BITS_RW_MASK was their only user. Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/main.c | 7 ------- include/linux/mlx5/device.h | 19 ------------------- 2 files changed, 26 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c9b4e50a593e..2078d9f03a5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -314,13 +314,6 @@ struct mlx5_reg_host_endianness { u8 rsvd[15]; }; -#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) - -enum { - MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | - MLX5_DEV_CAP_FLAG_DCT, -}; - static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) { switch (size) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 15ac02eeed4f..95a4fa0fd40a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -386,21 +386,6 @@ enum { MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, }; -enum { - MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, - MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, - MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, - MLX5_DEV_CAP_FLAG_APM = 1LL << 17, - MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, - MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, - MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, - MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, - MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, - MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, - MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, - MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, -}; - enum { MLX5_ROCE_VERSION_1 = 0, MLX5_ROCE_VERSION_2 = 2, @@ -496,10 +481,6 @@ enum { MLX5_MAX_PAGE_SHIFT = 31 }; -enum { - MLX5_CAP_OFF_CMDIF_CSUM = 46, -}; - enum { /* * Max wqe size for rdma read is 512 bytes, so this From patchwork Wed Jun 8 20:04:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 12874626 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5F80AC433EF for ; Wed, 8 Jun 2022 20:05:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235852AbiFHUFg (ORCPT ); Wed, 8 Jun 2022 16:05:36 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59458 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234345AbiFHUFc (ORCPT ); Wed, 8 Jun 2022 16:05:32 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B050830F1FD; Wed, 8 Jun 2022 13:05:31 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id E6A2E61C7A; Wed, 8 Jun 2022 20:05:30 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3DF6DC3411C; Wed, 8 Jun 2022 20:05:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1654718730; bh=rfRZy5bClEZQrtsAwixBq183XD7OTaOrJsESf+FrZDg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=X3WgKwZMMseBSsmZpKfcpL2sgAw7qg3ibOYffR2abC0MFIfyEv2l0To9SNeD8fKk5 piUKTM1hB4CgQYsoPaYDoX3joaKjyEsAVz5AY4NW+9/B4iGj7v9s7VRWSrFMar1IdD aL0Cq+joN4MoISoTF4GjWtIihOm54sRhBRx5qhrH4ZYIXXy1cg2Nw0a/RuryGhSLg2 lqbP8MxDKKOCpqJzSFCJ3wPoIY8dyFL1HvhAIcnViQknFYFvXkSKmDAZ0gFoHG/v9n icS/6pxFAF1cihOW1IY/blFkB2TRqPRhcPKyn5dQfnqzWKnBzNfS1INwKGfIkLbL4y paWoGcxdeEzmg== From: Saeed Mahameed To: Leon Romanovsky , Saeed Mahameed Cc: Jason Gunthorpe , "David S. Miller" , Jakub Kicinski , Paolo Abeni , Eric Dumazet , netdev@vger.kernel.org, linux-rdma@vger.kernel.org, Ofer Levi Subject: [PATCH mlx5-next 6/6] net/mlx5: Add bits and fields to support enhanced CQE compression Date: Wed, 8 Jun 2022 13:04:52 -0700 Message-Id: <20220608200452.43880-7-saeed@kernel.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220608200452.43880-1-saeed@kernel.org> References: <20220608200452.43880-1-saeed@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Ofer Levi Expose ifc bits and add needed structure fields and methods to support enhanced CQE compression feature. The enhanced CQE compression feature improves cpu utiliziation with better packet latency from nic to host. Signed-off-by: Ofer Levi Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 16 +++++++++++++++- include/linux/mlx5/mlx5_ifc.h | 7 +++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 95a4fa0fd40a..b5f58fd37a0f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -822,7 +822,10 @@ struct mlx5_cqe64 { __be32 timestamp_l; __be32 sop_drop_qpn; __be16 wqe_counter; - u8 signature; + union { + u8 signature; + u8 validity_iteration_count; + }; u8 op_own; }; @@ -854,6 +857,11 @@ enum { MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3, }; +enum { + MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0, + MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1, +}; + #define MLX5_MINI_CQE_ARRAY_SIZE 8 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) @@ -866,6 +874,12 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) return cqe->op_own >> 4; } +static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe) +{ + /* num_of_mini_cqes is zero based */ + return get_cqe_opcode(cqe) + 1; +} + static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 585d246cef3b..e01148781d57 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1736,7 +1736,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_dci_errored_streams[0x5]; u8 reserved_at_598[0x8]; - u8 reserved_at_5a0[0x13]; + u8 reserved_at_5a0[0x10]; + u8 enhanced_cqe_compression[0x1]; + u8 reserved_at_5b1[0x2]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; @@ -4136,7 +4138,8 @@ struct mlx5_ifc_cqc_bits { u8 cqe_comp_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; - u8 reserved_at_18[0x8]; + u8 reserved_at_18[0x6]; + u8 cqe_compression_layout[0x2]; u8 reserved_at_20[0x20];