From patchwork Mon Jul 17 12:55:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andy Shevchenko X-Patchwork-Id: 13316454 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from mga06b.intel.com ([134.134.136.31] helo=mga06.intel.com) by bombadil.infradead.org with esmtps (Exim 4.96 #2 (Red Hat Linux)) id 1qLNlJ-0042KJ-1c for linux-arm-kernel@lists.infradead.org; Mon, 17 Jul 2023 12:56:23 +0000 From: Andy Shevchenko Subject: [PATCH v3 3/4] arm64: smccc: Replace custom COUNT_ARGS() & CONCATENATE() implementations Date: Mon, 17 Jul 2023 15:55:20 +0300 Message-Id: <20230717125521.43176-4-andriy.shevchenko@linux.intel.com> In-Reply-To: <20230717125521.43176-1-andriy.shevchenko@linux.intel.com> References: <20230717125521.43176-1-andriy.shevchenko@linux.intel.com> MIME-Version: 1.0 List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+lwn-linux-arm-kernel=archive.lwn.net@lists.infradead.org List-Archive: To: Andy Shevchenko , Shuah Khan , David Gow , Daniel Latypov , "Steven Rostedt (Google)" , Bjorn Helgaas , linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org, kunit-dev@googlegroups.com, linux-arm-kernel@lists.infradead.org, linux-pci@vger.kernel.org, linux-trace-kernel@vger.kernel.org Cc: Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , x86@kernel.org, "H. Peter Anvin" , Brendan Higgins , Mark Rutland , Lorenzo Pieralisi , Sudeep Holla , Masami Hiramatsu , Andrew Morton Replace custom implementation of the macros from args.h. Signed-off-by: Andy Shevchenko --- include/linux/arm-smccc.h | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index f196c19f8e55..2865b14c2bba 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -5,6 +5,7 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include #include #include @@ -413,11 +414,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #endif -#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x - -#define __count_args(...) \ - ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) - #define __constraint_read_0 "r" (arg0) #define __constraint_read_1 __constraint_read_0, "r" (arg1) #define __constraint_read_2 __constraint_read_1, "r" (arg2) @@ -475,14 +471,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ register typeof(a7) arg7 asm("r7") = __a7 -#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) -#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) - -#define ___constraints(count) \ - : __constraint_read_ ## count \ - : smccc_sve_clobbers "memory" -#define __constraints(count) ___constraints(count) - /* * We have an output list that is not necessarily used, and GCC feels * entitled to optimise the whole sequence away. "volatile" is what @@ -494,11 +482,13 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, register unsigned long r1 asm("r1"); \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3"); \ - __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + CONCATENATE(__declare_arg_, COUNT_ARGS(__VA_ARGS__)); \ asm volatile(SMCCC_SVE_CHECK \ inst "\n" : \ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ - __constraints(__count_args(__VA_ARGS__))); \ + : CONCATENATE(__constraint_read_, \ + COUNT_ARGS(__VA_ARGS__)) \ + : smccc_sve_clobbers "memory"); \ if (___res) \ *___res = (typeof(*___res)){r0, r1, r2, r3}; \ } while (0) @@ -542,8 +532,11 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define __fail_smccc_1_1(...) \ do { \ - __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm ("" : __constraints(__count_args(__VA_ARGS__))); \ + CONCATENATE(__declare_arg_, COUNT_ARGS(__VA_ARGS__)); \ + asm ("" : \ + : CONCATENATE(__constraint_read_, \ + COUNT_ARGS(__VA_ARGS__)) \ + : smccc_sve_clobbers "memory"); \ if (___res) \ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \ } while (0)