diff mbox series

[4/4] fs/binfmt_elf.c: Cosmetic

Message ID 20201121135736.295705-5-alx.manpages@gmail.com (mailing list archive)
State New, archived
Headers show
Series Cosmetic | expand

Commit Message

Alejandro Colomar Nov. 21, 2020, 1:57 p.m. UTC
Non-trivial changes:

Invert 'if's to simplify logic.
Use 'goto' in conjunction with the above, when appropriate.

Signed-off-by: Alejandro Colomar <alx.manpages@gmail.com>
---
 fs/binfmt_elf.c | 115 +++++++++++++++++++++++++-----------------------
 1 file changed, 59 insertions(+), 56 deletions(-)
diff mbox series

Patch

diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b5e1e0a0917a..dbd50b5bf238 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1079,65 +1079,68 @@  static int load_elf_binary(struct linux_binprm *bprm)
 		 */
 		if (elf_ex->e_type == ET_EXEC || load_addr_set) {
 			elf_flags |= MAP_FIXED;
-		} else if (elf_ex->e_type == ET_DYN) {
-			/*
-			 * This logic is run once for the first LOAD Program
-			 * Header for ET_DYN binaries to calculate the
-			 * randomization (load_bias) for all the LOAD
-			 * Program Headers, and to calculate the entire
-			 * size of the ELF mapping (total_size). (Note that
-			 * load_addr_set is set to true later once the
-			 * initial mapping is performed.)
-			 *
-			 * There are effectively two types of ET_DYN
-			 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
-			 * and loaders (ET_DYN without INTERP, since they
-			 * _are_ the ELF interpreter). The loaders must
-			 * be loaded away from programs since the program
-			 * may otherwise collide with the loader (especially
-			 * for ET_EXEC which does not have a randomized
-			 * position). For example to handle invocations of
-			 * "./ld.so someprog" to test out a new version of
-			 * the loader, the subsequent program that the
-			 * loader loads must avoid the loader itself, so
-			 * they cannot share the same load range. Sufficient
-			 * room for the brk must be allocated with the
-			 * loader as well, since brk must be available with
-			 * the loader.
-			 *
-			 * Therefore, programs are loaded offset from
-			 * ELF_ET_DYN_BASE and loaders are loaded into the
-			 * independently randomized mmap region (0 load_bias
-			 * without MAP_FIXED).
-			 */
-			if (interpreter) {
-				load_bias = ELF_ET_DYN_BASE;
-				if (current->flags & PF_RANDOMIZE)
-					load_bias += arch_mmap_rnd();
-				alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
-				if (alignment)
-					load_bias &= ~(alignment - 1);
-				elf_flags |= MAP_FIXED;
-			} else
-				load_bias = 0;
+			goto proceed_normally;
+		}
+		if (elf_ex->e_type != ET_DYN)
+			goto proceed_normally;
+		/*
+		 * This logic is run once for the first LOAD Program
+		 * Header for ET_DYN binaries to calculate the
+		 * randomization (load_bias) for all the LOAD
+		 * Program Headers, and to calculate the entire
+		 * size of the ELF mapping (total_size). (Note that
+		 * load_addr_set is set to true later once the
+		 * initial mapping is performed.)
+		 *
+		 * There are effectively two types of ET_DYN
+		 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
+		 * and loaders (ET_DYN without INTERP, since they
+		 * _are_ the ELF interpreter). The loaders must
+		 * be loaded away from programs since the program
+		 * may otherwise collide with the loader (especially
+		 * for ET_EXEC which does not have a randomized
+		 * position). For example to handle invocations of
+		 * "./ld.so someprog" to test out a new version of
+		 * the loader, the subsequent program that the
+		 * loader loads must avoid the loader itself, so
+		 * they cannot share the same load range. Sufficient
+		 * room for the brk must be allocated with the
+		 * loader as well, since brk must be available with
+		 * the loader.
+		 *
+		 * Therefore, programs are loaded offset from
+		 * ELF_ET_DYN_BASE and loaders are loaded into the
+		 * independently randomized mmap region (0 load_bias
+		 * without MAP_FIXED).
+		 */
+		if (interpreter) {
+			load_bias = ELF_ET_DYN_BASE;
+			if (current->flags & PF_RANDOMIZE)
+				load_bias += arch_mmap_rnd();
+			alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+			if (alignment)
+				load_bias &= ~(alignment - 1);
+			elf_flags |= MAP_FIXED;
+		} else {
+			load_bias = 0;
+		}
 
-			/*
-			 * Since load_bias is used for all subsequent loading
-			 * calculations, we must lower it by the first vaddr
-			 * so that the remaining calculations based on the
-			 * ELF vaddrs will be correctly offset. The result
-			 * is then page aligned.
-			 */
-			load_bias = ELF_PAGESTART(load_bias - vaddr);
+		/*
+		 * Since load_bias is used for all subsequent loading
+		 * calculations, we must lower it by the first vaddr
+		 * so that the remaining calculations based on the
+		 * ELF vaddrs will be correctly offset. The result
+		 * is then page aligned.
+		 */
+		load_bias = ELF_PAGESTART(load_bias - vaddr);
 
-			total_size = total_mapping_size(elf_phdata,
-							elf_ex->e_phnum);
-			if (!total_size) {
-				retval = -EINVAL;
-				goto out_free_dentry;
-			}
+		total_size = total_mapping_size(elf_phdata,
+						elf_ex->e_phnum);
+		if (!total_size) {
+			retval = -EINVAL;
+			goto out_free_dentry;
 		}
-
+proceed_normally:	/* FIXME: a better label name? */
 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
 				elf_prot, elf_flags, total_size);
 		if (BAD_ADDR(error)) {