From patchwork Wed Dec 11 10:37:08 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alice Ryhl X-Patchwork-Id: 13903325 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) by smtp.lore.kernel.org (Postfix) with ESMTP id B6C24E77182 for ; Wed, 11 Dec 2024 10:37:46 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id 20FEF8D0015; Wed, 11 Dec 2024 05:37:45 -0500 (EST) Received: by kanga.kvack.org (Postfix, from userid 40) id 19A966B0119; Wed, 11 Dec 2024 05:37:45 -0500 (EST) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id E6A318D0015; Wed, 11 Dec 2024 05:37:44 -0500 (EST) X-Delivered-To: linux-mm@kvack.org Received: from relay.hostedemail.com (smtprelay0014.hostedemail.com [216.40.44.14]) by kanga.kvack.org (Postfix) with ESMTP id C2FA26B0118 for ; Wed, 11 Dec 2024 05:37:44 -0500 (EST) Received: from smtpin19.hostedemail.com (a10.router.float.18 [10.200.18.1]) by unirelay07.hostedemail.com (Postfix) with ESMTP id 7C195160881 for ; Wed, 11 Dec 2024 10:37:44 +0000 (UTC) X-FDA: 82882326198.19.15A4493 Received: from mail-wm1-f74.google.com (mail-wm1-f74.google.com [209.85.128.74]) by imf25.hostedemail.com (Postfix) with ESMTP id 0BAA8A0002 for ; Wed, 11 Dec 2024 10:37:25 +0000 (UTC) Authentication-Results: imf25.hostedemail.com; dkim=pass header.d=google.com header.s=20230601 header.b=CEY6PZvN; dmarc=pass (policy=reject) header.from=google.com; spf=pass (imf25.hostedemail.com: domain of 3dWtZZwkKCPIUfcWYlsbfaiiafY.Wigfchor-ggepUWe.ila@flex--aliceryhl.bounces.google.com designates 209.85.128.74 as permitted sender) smtp.mailfrom=3dWtZZwkKCPIUfcWYlsbfaiiafY.Wigfchor-ggepUWe.ila@flex--aliceryhl.bounces.google.com ARC-Seal: i=1; s=arc-20220608; d=hostedemail.com; t=1733913441; a=rsa-sha256; cv=none; b=fVufVDPS5jEi6ixDQTrkxt4B7VsSG+6qONG3uOZwTa+pjbdmvtsS2267pbCNrOVNxMNnDm eFAjFG3l9FWUj36hOr8LUqnnu1kEWLFXucnm1WGu6qrTla7OwEWtPi2R7BcW7+UVveimw5 jtQOZ1FF3FmW1M/Z4yF6c3xjqHOMclI= ARC-Authentication-Results: i=1; imf25.hostedemail.com; dkim=pass header.d=google.com header.s=20230601 header.b=CEY6PZvN; dmarc=pass (policy=reject) header.from=google.com; spf=pass (imf25.hostedemail.com: domain of 3dWtZZwkKCPIUfcWYlsbfaiiafY.Wigfchor-ggepUWe.ila@flex--aliceryhl.bounces.google.com designates 209.85.128.74 as permitted sender) smtp.mailfrom=3dWtZZwkKCPIUfcWYlsbfaiiafY.Wigfchor-ggepUWe.ila@flex--aliceryhl.bounces.google.com ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=hostedemail.com; s=arc-20220608; t=1733913441; h=from:from:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type:content-transfer-encoding: in-reply-to:in-reply-to:references:references:dkim-signature; bh=jtfdo1hWu/gkdW8i64KIMP+Laz0H38tgBK8KPM9++n4=; b=sCYsunuUnBcxE55P5b422KC9Y/PfBbKb/do5VM/NNAiEV40UGCbUZgVu+qAbvp7xoXxl8a 6iPg1koxBSvCLMu1wIaNW7jv0GTcEgqSu6vCO5N5KCxhCAjLJUu23lOZcWlu2DYwRZZ4mz nReXY1uKU2YUCyCbG1ia7JXesgyDqPk= Received: by mail-wm1-f74.google.com with SMTP id 5b1f17b1804b1-4362153dcd6so202075e9.2 for ; Wed, 11 Dec 2024 02:37:42 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1733913461; x=1734518261; darn=kvack.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=jtfdo1hWu/gkdW8i64KIMP+Laz0H38tgBK8KPM9++n4=; b=CEY6PZvNipHuFvOgi1Ez3vgMiwXKxmwlip88IwSRhQ24HsWLg0BomempS4kPTnj6ul FGf8B4IOdltZVZY3Db6FFylh46AGFzzoUcAE3nTZ80Qy8prrdSo8nDmDOkaxkXC2M8w2 ZF0znoPmK+INT1ki42aejWsEUc0hkAEZdMiyAzVqEWfZ4f7IKYT8F+5rUpKQTnIdkYQ7 3kKU4wH2+71RWeYlDKpyF+U5CQjmYWXQtuP8W/xO1CddV3CfnYh0xzCpEMUlPPri1vPN 9ty31rr9LLtYzrhDuRgcXILnNJb90z3XcfN7ZB6o9uLnxeQDsrbel/0rKfk3kkz4DWv8 J2lw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1733913461; x=1734518261; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=jtfdo1hWu/gkdW8i64KIMP+Laz0H38tgBK8KPM9++n4=; b=ig2SI2umLMw52+AYV9Kq32iYsDZleM9rk9TLaMZldTcamgIa2kTxhXb60UuGseGgvS MRCpJC9OKaZoy0juwWQ8ox/C96uPL5GFMtYnBomDiZPUEN6MnNUGhcLLOzl15NzpvSsV wyY864MbUB0K23Z5gzbv1LdljeXvQqHLIFA7AMOGthvQuXwIkmbaFGVDe/0Z1jpfh3wW HxP4i2fHUuUG2qhlm4X/9GUvhakSP9WmmcD3pn22J7IybVFpU5VEylhHfADWaFvG8jn+ foNErrHcuf32TkC4UMNDMUjJf5oA9qDETK+RG0aJddU2NZBbiXy9bCGTi8cymirYYjHB CBNw== X-Forwarded-Encrypted: i=1; AJvYcCU4eI/gN9+p9171Gek3aEyw6tPQqJTsPE9oaQQlUJSIwYNDFpFJRueoXESEhWpWXBrhQxoJ01SPIQ==@kvack.org X-Gm-Message-State: AOJu0Yw1MqCLyEPaDrhvqsxkOxntEgQoDJKbrJX7KMu4f0mTn/uYJRC2 bmkBw/xnpfAEW1KfgAeMVC0IaEsZURInR/CkpaO2cSZwVIngKsqfjSFL63dupHgs15nAdGhYL11 ivgfA7m/JJ1A3wg== X-Google-Smtp-Source: AGHT+IEPSG2Hqz3P3TcvwAF16etBCufMRdEZ09D0s4Xc1JzGR3A143l7IWc1m3i2YQwgadn0owNcK8U+CXDiey4= X-Received: from wmok4.prod.google.com ([2002:a05:600c:4784:b0:434:a2c3:d51b]) (user=aliceryhl job=prod-delivery.src-stubby-dispatcher) by 2002:a05:6000:4023:b0:385:eb17:cd3d with SMTP id ffacd0b85a97d-3864ce49640mr1860462f8f.8.1733913461236; Wed, 11 Dec 2024 02:37:41 -0800 (PST) Date: Wed, 11 Dec 2024 10:37:08 +0000 In-Reply-To: <20241211-vma-v11-0-466640428fc3@google.com> Mime-Version: 1.0 References: <20241211-vma-v11-0-466640428fc3@google.com> X-Developer-Key: i=aliceryhl@google.com; a=openpgp; fpr=49F6C1FAA74960F43A5B86A1EE7A392FDE96209F X-Developer-Signature: v=1; a=openpgp-sha256; l=3946; i=aliceryhl@google.com; h=from:subject:message-id; bh=PSMNvAHImS1yuLQzsvSoD2e3+5nrD/G5jvs5aJHnybQ=; b=owEBbQKS/ZANAwAKAQRYvu5YxjlGAcsmYgBnWWtoiZME2oih+BwGJvgLeV9XdV6aNRW+Q12xj brbx3/LqYWJAjMEAAEKAB0WIQSDkqKUTWQHCvFIvbIEWL7uWMY5RgUCZ1lraAAKCRAEWL7uWMY5 RhRpEACoHtQCpffN1jELk1C4qWvx35jHIqYWia49W6YKDfgnOKbSh/PaQNsmFLQ5xqVgbm7v/UK aBQwg3EiD7cArhLxL66nlarZSNU4xAyIYRagufeEs62wVM8fiHrN0bLBQZaTeVR45ka6Dr/NpjT JDvwDHSpm37SvlfAmofc9S+xU4wW8bYutsCq3jCcK0SwgR0BZRcT+q5yYgBnR06vTwCt8Eo32tF 4eCgLuB09ENw8ozEaZuy3U/nIsKDw/Fs1cyweaV3JFTRhZu08ZCQFCMZIItw+z7S9L6SHZ7QOqo 2aszTbLLzdzJChsBBjyQ7S46UghGprkQh+8+iJ/+OOOK9T4A71euURV7yzgU5+I7pOVq7uuEUvc xMxwyyWyeR/cf/dst1SzUffc7jT2e/Ap4W1LCtdaFkEgNXR6/6vTsJ9ffKl5VQYaw5CW+v+L/NY BXwxO+E6cGtAf9afhviyTbXnazRnar/c2501rI4RBu/3zB+0LuIiuSBH4ZyHIkanDFBbekdJzfe j2fNRFns1Rv36w8SKGtQokraNhC7EX9sVaWLGkn778X7n00EcmfFH5uMSpERmffICVt/3aJa2rW nW09v5vj355pS053y8y4wrt1fWYw/tuQYoBi21rp2L+yWlr+nKcIRcoshluSVLDbH40G2H3jp6u boRup1nXBHzGN5w== X-Mailer: b4 0.13.0 Message-ID: <20241211-vma-v11-4-466640428fc3@google.com> Subject: [PATCH v11 4/8] mm: rust: add lock_vma_under_rcu From: Alice Ryhl To: Miguel Ojeda , Matthew Wilcox , Lorenzo Stoakes , Vlastimil Babka , John Hubbard , "Liam R. Howlett" , Andrew Morton , Greg Kroah-Hartman , Arnd Bergmann , Christian Brauner , Jann Horn , Suren Baghdasaryan Cc: Alex Gaynor , Boqun Feng , Gary Guo , " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " , Benno Lossin , Andreas Hindborg , Trevor Gross , linux-kernel@vger.kernel.org, linux-mm@kvack.org, rust-for-linux@vger.kernel.org, Alice Ryhl X-Stat-Signature: zyeh4ta56khr9bp5r7zitgfdi4jy1seu X-Rspam-User: X-Rspamd-Queue-Id: 0BAA8A0002 X-Rspamd-Server: rspam08 X-HE-Tag: 1733913445-958195 X-HE-Meta: U2FsdGVkX1+PvWUNhlmpTOHyh0MpffJThLEn8Rcj0eYINvhUhzY4aHOd/p5RW0uqfJ/pE6Ttj8D5M09gdeULBMJDbIVXOdIMVAFCEsPFTzxg5rj5t0fXakcLlH38sFVrEquMrmjuH66VB9Ng/ZeIxKnOvaV3EEqmQvTfnkDwgTuImysIHiFBlfyvk3UfLeZuCEhxx+gAWf9iHe3mFtr/BvH1FH2LLHZS+cI717h41e7VnwFw+B/g+hHziSvv1arqo9zCCP3amMcjvJgC6to8arPNtPrHjPIpgLIRyv8wo7Vc95r9N00N4eqEJKRPr7cIjeqsvkvZxUl2yd+p1xvI/k0O0ubg3JJZegt3VsrrEo7mX/jMm7lUx42lfACPvPrYEZzAVSagSJhHSsOpflmCBf5+QuKLzuzBErZqq8E8RBmdNwW3rDACSlPdhTXPd8f0cU5YDIIVLX8lF0PndE+53iHTVdGTSx0bkxBfZBBplszumtHcXBEzfvu+Oc2XX5kiY3Fm9viyZ9iNVGv3rQw+bM2/yYR2fAVmtO1boMYquyKe/HglM795naVfyOBPL9mlw8yfj6re6JpJ1neCcOwPZ0ZGeSijtd2WxgjnZAuwjGnrPEcYwSXfdUZfxE8tEW1ZBGiC1tVsOlI1ywLH1ZM+r1yYxx4uku/fvxxdIGUSKM0WufJPbyiH3A0t4wvJyeglAFy/JS/sP6VJenphuFfLLDgLj6heXzACcg6jRa1y8vLPzeEgCNmf7Xe/Xg4xV+w8aFkwDuQpwylly3bUSmQOGr/6AHpVCDukLlkblq06Zyka6DXDWfv8A5fhMICpvnYFqjplmQJ5kp1vnA5SYxQ1n89ria7aPv2Q8VqOstWYxdanGMjZqB1I4JQWhMUai4BvpocHYlM9LkOL5t1Cb3W6uG02+SGXp9Op6alD1IZkqp1xRT/x9/ZNl6tb8cuyik8WXQPcjHP7591kGog7T6p 51JW+Ed6 QnEPdtrjIcC3TlPFN9sT0GL8VMfR/dbwKHzdRdnNmDvcS50woz2jHRf0xMcfiW/cd17DoU+Y/oflwQ091W1VCL3RhiQ9YIVFOuwgukfQoMyoJWvkmsQhLtodN5kit0k3PeEHSYpcAw0cSWZ3Mvf8YJDATxx37qYFC1DQUm3M+Era4FcHAgeBcap/P2cCZBipEFoXb4XbBV62b6qwNsDuHAjtFnWV3oXFvesoNeNXW2cxhfEqSwzjmorAoedXN82MveqGyvK4vQIuclK2MKp2vAa/DDraT8azQ0FIjQARw6cfig3Q2yOw+vnmQP0kQSRaK2F24reAIHDg2iQwUIy66KgrEAoaX868K2hUOO8flVkXGMfUSdKid+2HekZr+tD2SkOExjbQkfgSjWK+adassufJWVSlQ04OKjaGyu0lelQBZ0afGw9HqEukDNrQldLMbjFQtByRXnMTzFptmfZ7XERq0Cc/M/ocv0LxM19jnP/fn0PgPKEoQJbnzY8ydAIFYkAGD/cDH4Fp+q1SA6Me5oInucjtRKI56ij5gQVU0Gd1qD5NwrPPthcBJvjya2YS1i9hiHTBlczJEVTksZ52AZuI0otpywHFenCJrhbnAp7gd738zgqB8TRf7XCX9UBrINijNmKyikmN4zhxDSckrfXppKhLlYUuZiGQkdYlxm4JZAq+NFsW89zDTQOmzRz4pmhh2Tyq6BS48QwA= X-Bogosity: Unsure, tests=bogofilter, spamicity=0.499337, version=1.2.4 Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: List-Subscribe: List-Unsubscribe: Currently, the binder driver always uses the mmap lock to make changes to its vma. Because the mmap lock is global to the process, this can involve significant contention. However, the kernel has a feature called per-vma locks, which can significantly reduce contention. For example, you can take a vma lock in parallel with an mmap write lock. This is important because contention on the mmap lock has been a long-term recurring challenge for the Binder driver. This patch introduces support for using `lock_vma_under_rcu` from Rust. The Rust Binder driver will be able to use this to reduce contention on the mmap lock. Acked-by: Lorenzo Stoakes (for mm bits) Reviewed-by: Jann Horn Signed-off-by: Alice Ryhl --- rust/helpers/mm.c | 5 +++++ rust/kernel/mm.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c index 7b72eb065a3e..81b510c96fd2 100644 --- a/rust/helpers/mm.c +++ b/rust/helpers/mm.c @@ -43,3 +43,8 @@ struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm, { return vma_lookup(mm, addr); } + +void rust_helper_vma_end_read(struct vm_area_struct *vma) +{ + vma_end_read(vma); +} diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs index ace8e7d57afe..425b73a9dfe6 100644 --- a/rust/kernel/mm.rs +++ b/rust/kernel/mm.rs @@ -13,6 +13,7 @@ use core::{ops::Deref, ptr::NonNull}; pub mod virt; +use virt::VmAreaRef; /// A wrapper for the kernel's `struct mm_struct`. /// @@ -170,6 +171,32 @@ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser { unsafe { &*ptr.cast() } } + /// Attempt to access a vma using the vma read lock. + /// + /// This is an optimistic trylock operation, so it may fail if there is contention. In that + /// case, you should fall back to taking the mmap read lock. + /// + /// When per-vma locks are disabled, this always returns `None`. + #[inline] + pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option> { + #[cfg(CONFIG_PER_VMA_LOCK)] + { + // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where + // `mm_users` is non-zero. + let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) }; + if !vma.is_null() { + return Some(VmaReadGuard { + // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a + // valid vma. The vma is stable for as long as the vma read lock is held. + vma: unsafe { VmAreaRef::from_raw(vma) }, + _nts: NotThreadSafe, + }); + } + } + + None + } + /// Lock the mmap read lock. #[inline] pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> { @@ -238,3 +265,32 @@ fn drop(&mut self) { unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) }; } } + +/// A guard for the vma read lock. +/// +/// # Invariants +/// +/// This `VmaReadGuard` guard owns the vma read lock. +pub struct VmaReadGuard<'a> { + vma: &'a VmAreaRef, + // `vma_end_read` must be called on the same thread as where the lock was taken + _nts: NotThreadSafe, +} + +// Make all `VmAreaRef` methods available on `VmaReadGuard`. +impl Deref for VmaReadGuard<'_> { + type Target = VmAreaRef; + + #[inline] + fn deref(&self) -> &VmAreaRef { + self.vma + } +} + +impl Drop for VmaReadGuard<'_> { + #[inline] + fn drop(&mut self) { + // SAFETY: We hold the read lock by the type invariants. + unsafe { bindings::vma_end_read(self.vma.as_ptr()) }; + } +}