From patchwork Fri Jul 10 13:48:50 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Daniel Thompson X-Patchwork-Id: 6766181 Return-Path: X-Original-To: patchwork-linux-arm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 9D5749F380 for ; Fri, 10 Jul 2015 13:51:52 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 9D93C20670 for ; Fri, 10 Jul 2015 13:51:51 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.9]) (using TLSv1.2 with cipher AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 9905920646 for ; Fri, 10 Jul 2015 13:51:50 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.80.1 #2 (Red Hat Linux)) id 1ZDYgD-00017l-6t; Fri, 10 Jul 2015 13:49:29 +0000 Received: from mail-wi0-f179.google.com ([209.85.212.179]) by bombadil.infradead.org with esmtps (Exim 4.80.1 #2 (Red Hat Linux)) id 1ZDYgA-00010u-Hn for linux-arm-kernel@lists.infradead.org; Fri, 10 Jul 2015 13:49:27 +0000 Received: by wicmz13 with SMTP id mz13so14301573wic.0 for ; Fri, 10 Jul 2015 06:49:04 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id; bh=LZCm1oJH+EXQWpDUE+infA1WFXJY2e2pDS15OEsrYaU=; b=ahumbnWbwZ3coP9BC+GhQKFUNmR1vTbz+do33CXukAclPEZpSqaEizAepoU4NPa2LM bmQf2rjnwhCJWcETpB6LTEy4B3ETRNRnIZU1vIW3S/bJG4RwMS9Rij2n1QeiTxoggpam QRLlLq/y3R9qEwtCnX2cvbOxN+6DubSQ+LLL3z9yGognoThkXmxSY8XwWLgi+zSuQ7o7 72FtVPQmQtEh3psgrI3PiHKY0FkZwYWro7aJVUSLR6sSHSpeR7Gk4I3a7GHodCEbli0V l4DPDgQIAHidlgF2ll9ZCwnhPpkt2l7g69fyxXvG0T0NLBaSztLncvEqOb77Isk/eYjC ykJg== X-Gm-Message-State: ALoCoQl1/JIXz2sWcVoCcVODZwxs66sNqmk9gi972TWk4kDn0+uuvbJ3WOGDFusL3t4qKP4E+V9j X-Received: by 10.180.84.202 with SMTP id b10mr6475684wiz.23.1436536144037; Fri, 10 Jul 2015 06:49:04 -0700 (PDT) Received: from wychelm.lan (cpc4-aztw19-0-0-cust71.18-1.cable.virginm.net. [82.33.25.72]) by smtp.gmail.com with ESMTPSA id ev8sm13738358wjb.8.2015.07.10.06.49.02 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 10 Jul 2015 06:49:03 -0700 (PDT) From: Daniel Thompson To: Catalin Marinas , Will Deacon Subject: [PATCH] arm64: alternative: Provide if/else/endif assembler macros Date: Fri, 10 Jul 2015 14:48:50 +0100 Message-Id: <1436536130-31438-1-git-send-email-daniel.thompson@linaro.org> X-Mailer: git-send-email 2.4.3 X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20150710_064926_744403_5298D984 X-CRM114-Status: GOOD ( 11.99 ) X-Spam-Score: -2.6 (--) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Daniel Thompson , linaro-kernel@lists.linaro.org, patches@linaro.org, Marc Zyngier , Andre Przywara , linux-kernel@vger.kernel.org, John Stultz , Christoffer Dall , Sumit Semwal , linux-arm-kernel@lists.infradead.org MIME-Version: 1.0 Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org X-Spam-Status: No, score=-4.8 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP The existing alternative_insn macro has some limitations that make it hard to work with. In partiuclar the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly. Introduce an alternative set of macros that allows instructions to be presented to the assembler as normal and switch everything over to the new macros. Signed-off-by: Daniel Thompson --- Notes: To be honest these if not/else/endif macros are simply more readable than the original macro and that might be enough to justify them on their own. However below is an example that is needlessly hard to write without them because ICC_PMR_EL1 is a C pre-processor macro. .macro disable_irq, tmp mov \tmp, #ICC_PMR_EL1_MASKED alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF msr daifset, #2 alternative_else msr_s ICC_PMR_EL1, \tmp alternative_endif .endm The new macros have received a fair degree of testing because I have based my (not published since March) pseudo-NMI patch set on them. arch/arm64/include/asm/alternative.h | 18 ++++++++++++------ arch/arm64/kernel/entry.S | 29 +++++++++++++---------------- arch/arm64/kvm/hyp.S | 12 ++++++++++-- arch/arm64/mm/cache.S | 7 ++++++- 4 files changed, 41 insertions(+), 25 deletions(-) -- 2.4.3 diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..8c8cdfac7251 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -65,13 +65,19 @@ void free_alternatives_memory(void); .byte \alt_len .endm -.macro alternative_insn insn1 insn2 cap -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f +.macro alternative_if_not cap + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection - .pushsection .altinstr_replacement, "ax" -663: \insn2 +661: +.endm + +.macro alternative_else +662: .pushsection .altinstr_replacement, "ax" +663: +.endm + +.macro alternative_endif 664: .popsection .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7691a378668..be8a70d4028c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -122,26 +122,23 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 - #ifdef CONFIG_ARM64_ERRATUM_845719 - -#undef SEQUENCE_ORG -#undef SEQUENCE_ALT - +alternative_if_not ARM64_WORKAROUND_845719 + nop + nop #ifdef CONFIG_PID_IN_CONTEXTIDR - -#define SEQUENCE_ORG "nop ; nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:" - + nop +#endif +alternative_else + tbz x22, #4, 1f +#ifdef CONFIG_PID_IN_CONTEXTIDR + mrs x29, contextidr_el1 + msr contextidr_el1, x29 #else - -#define SEQUENCE_ORG "nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:" - + msr contextidr_el1, xzr #endif - - alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719 - +1: +alternative_endif #endif .endif msr elr_el1, x21 // set up the return data diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 17a8fb14f428..10915aaf0b01 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -810,7 +810,11 @@ * Call into the vgic backend for state saving */ .macro save_vgic_state - alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __save_vgic_v2_state +alternative_else + bl __save_vgic_v3_state +alternative_endif mrs x24, hcr_el2 mov x25, #HCR_INT_OVERRIDE neg x25, x25 @@ -827,7 +831,11 @@ orr x24, x24, #HCR_INT_OVERRIDE orr x24, x24, x25 msr hcr_el2, x24 - alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __restore_vgic_v2_state +alternative_else + bl __restore_vgic_v3_state +alternative_endif .endm .macro save_timer_state diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index bdeb5d38c2dd..eb48d5df4a0f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -143,7 +143,12 @@ __dma_clean_range: dcache_line_size x2, x3 sub x3, x2, #1 bic x0, x0, x3 -1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE +1: +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc cvac, x0 +alternative_else + dc civac, x0 +alternative_endif add x0, x0, x2 cmp x0, x1 b.lo 1b