Message ID | 20161005200522.GE19539@ZenIV.linux.org.uk (mailing list archive) |
---|---|
State | Not Applicable, archived |
Headers | show |
On Fri, Oct 07, 2016 at 10:43:18AM -0400, CAI Qian wrote: > Hmm, this round of trinity triggered a different hang. > > [ 2094.487964] [<ffffffff813e27b7>] call_rwsem_down_write_failed+0x17/0x30 > [ 2094.495450] [<ffffffff817d1bff>] down_write+0x5f/0x80 > [ 2094.508284] [<ffffffff8127e301>] chown_common.isra.12+0x131/0x1e0 > [ 2094.553784] 2 locks held by trinity-c0/3126: > [ 2094.558552] #0: (sb_writers#14){.+.+.+}, at: [<ffffffff81284be1>] __sb_start_write+0xd1/0xf0 > [ 2094.568240] #1: (&sb->s_type->i_mutex_key#17){++++++}, at: [<ffffffff8127e301>] chown_common.isra.12+0x131/0x1e0 Waiting on i_mutex. > [ 2094.643597] [<ffffffff817d24b7>] rwsem_down_read_failed+0x107/0x190 > [ 2094.665119] [<ffffffff810f8b0b>] down_read_nested+0x5b/0x80 > [ 2094.691133] [<ffffffff812bdbbd>] vfs_fsync_range+0x3d/0xb0 > [ 2094.721844] 1 lock held by trinity-c1/3127: > [ 2094.726515] #0: (&xfs_nondir_ilock_class){++++..}, at: [<ffffffffa03335fa>] xfs_ilock+0xfa/0x260 [xfs] Waiting on i_ilock. > [ 2094.808078] [<ffffffff817cf4df>] mutex_lock_nested+0x19f/0x450 > [ 2094.820715] [<ffffffff812a5313>] __fdget_pos+0x43/0x50 > [ 2094.826544] [<ffffffff81297f53>] SyS_getdents+0x83/0x140 > [ 2094.856682] #0: (&f->f_pos_lock){+.+.+.}, at: [<ffffffff812a5313>] __fdget_pos+0x43/0x50 concurrent readdir on the same directory fd, blocked on fd. > [ 2094.936885] [<ffffffff817cf4df>] mutex_lock_nested+0x19f/0x450 > [ 2094.956620] [<ffffffff812a5313>] __fdget_pos+0x43/0x50 > [ 2094.962454] [<ffffffff81298091>] SyS_getdents64+0x81/0x130 > [ 2094.988204] 1 lock held by trinity-c3/3129: > [ 2094.992872] #0: (&f->f_pos_lock){+.+.+.}, at: [<ffffffff812a5313>] __fdget_pos+0x43/0x50 Same. > [ 2095.073118] [<ffffffff817cf4df>] mutex_lock_nested+0x19f/0x450 > [ 2095.091589] [<ffffffff812811dd>] SyS_lseek+0x1d/0xb0 > [ 2095.097229] [<ffffffff81003c9c>] do_syscall_64+0x6c/0x1e0 > [ 2095.110547] 1 lock held by trinity-c4/3130: > [ 2095.115216] #0: (&f->f_pos_lock){+.+.+.}, at: [<ffffffff812a5313>] __fdget_pos+0x43/0x50 Concurrent lseek on directory fd, blocked on fd. > [ 2095.188230] [<ffffffff817d24b7>] rwsem_down_read_failed+0x107/0x190 > [ 2095.223558] [<ffffffffa03335fa>] xfs_ilock+0xfa/0x260 [xfs] > [ 2095.229894] [<ffffffffa03337d4>] xfs_ilock_attr_map_shared+0x34/0x40 [xfs] > [ 2095.237682] [<ffffffffa02ccfaf>] xfs_attr_get+0xdf/0x1b0 [xfs] > [ 2095.244312] [<ffffffffa0341bfc>] xfs_xattr_get+0x4c/0x70 [xfs] > [ 2095.250924] [<ffffffff812ad269>] generic_getxattr+0x59/0x70 > [ 2095.257244] [<ffffffff812acf9b>] vfs_getxattr+0x8b/0xb0 > [ 2095.263177] [<ffffffffa0435bd6>] ovl_xattr_get+0x46/0x60 [overlay] > [ 2095.270176] [<ffffffffa04331aa>] ovl_other_xattr_get+0x1a/0x20 [overlay] > [ 2095.277756] [<ffffffff812ad269>] generic_getxattr+0x59/0x70 > [ 2095.284079] [<ffffffff81345e9e>] cap_inode_need_killpriv+0x2e/0x40 > [ 2095.291078] [<ffffffff81349a33>] security_inode_need_killpriv+0x33/0x50 > [ 2095.298560] [<ffffffff812a2fb0>] dentry_needs_remove_privs+0x30/0x50 > [ 2095.305743] [<ffffffff8127ea21>] do_truncate+0x51/0xc0 > [ 2095.311581] [<ffffffff81284be1>] ? __sb_start_write+0xd1/0xf0 > [ 2095.318094] [<ffffffff81284be1>] ? __sb_start_write+0xd1/0xf0 > [ 2095.324609] [<ffffffff8127edde>] do_sys_ftruncate.constprop.15+0xfe/0x160 > [ 2095.332286] [<ffffffff8127ee7e>] SyS_ftruncate+0xe/0x10 > [ 2095.338225] [<ffffffff81003c9c>] do_syscall_64+0x6c/0x1e0 > [ 2095.344339] [<ffffffff817d4a3f>] entry_SYSCALL64_slow_path+0x25/0x25 > [ 2095.351531] 2 locks held by trinity-c5/3131: > [ 2095.356297] #0: (sb_writers#14){.+.+.+}, at: [<ffffffff81284be1>] __sb_start_write+0xd1/0xf0 > [ 2095.365983] #1: (&xfs_nondir_ilock_class){++++..}, at: [<ffffffffa03335fa>] xfs_ilock+0xfa/0x260 [xfs] truncate on overlay, removing xattrs from XFS file, blocked on i_ilock. > [ 2095.440372] [<ffffffff817d2782>] rwsem_down_write_failed+0x242/0x4b0 > [ 2095.474300] [<ffffffff8127e413>] chmod_common+0x63/0x150 > [ 2095.513452] 2 locks held by trinity-c6/3132: > [ 2095.518217] #0: (sb_writers#14){.+.+.+}, at: [<ffffffff81284be1>] __sb_start_write+0xd1/0xf0 > [ 2095.527895] #1: (&sb->s_type->i_mutex_key#17){++++++}, at: [<ffffffff8127e413>] chmod_common+0x63/0x150 chmod, blocked on i_mutex. > [ 2095.602379] [<ffffffff817d24b7>] rwsem_down_read_failed+0x107/0x190 > [ 2095.616490] [<ffffffff813e2788>] call_rwsem_down_read_failed+0x18/0x30 > [ 2095.623877] [<ffffffff810f8b0b>] down_read_nested+0x5b/0x80 > [ 2095.649889] [<ffffffff812bdbbd>] vfs_fsync_range+0x3d/0xb0 > [ 2095.680610] 1 lock held by trinity-c7/3133: > [ 2095.685281] #0: (&xfs_nondir_ilock_class){++++..}, at: [<ffffffffa03335fa>] xfs_ilock+0xfa/0x260 [xfs] fsync on file, blocked on i_ilock. > [ 2095.759662] [<ffffffff817d24b7>] rwsem_down_read_failed+0x107/0x190 > [ 2095.807155] [<ffffffff812bdbbd>] vfs_fsync_range+0x3d/0xb0 > [ 2095.813377] [<ffffffff812bdc8d>] do_fsync+0x3d/0x70 > [ 2095.818921] [<ffffffff812bdf63>] SyS_fdatasync+0x13/0x20 > [ 2095.838261] 1 lock held by trinity-c8/3135: > [ 2095.842930] #0: (&xfs_nondir_ilock_class){++++..}, at: [<ffffffffa03335fa>] xfs_ilock+0xfa/0x260 [xfs] ditto. > [ 2095.917305] [<ffffffff817d24b7>] rwsem_down_read_failed+0x107/0x190 > [ 2095.958968] [<ffffffffa0333790>] xfs_ilock_data_map_shared+0x30/0x40 [xfs] > [ 2095.966752] [<ffffffffa03128c6>] __xfs_get_blocks+0x96/0x9d0 [xfs] > [ 2095.989413] [<ffffffffa0313214>] xfs_get_blocks+0x14/0x20 [xfs] > [ 2095.996122] [<ffffffff812cca44>] do_mpage_readpage+0x474/0x800 > [ 2096.029678] [<ffffffff812ccf0d>] mpage_readpages+0x13d/0x1b0 > [ 2096.050724] [<ffffffffa0311f14>] xfs_vm_readpages+0x54/0x170 [xfs] > [ 2096.057724] [<ffffffff811f1a1d>] __do_page_cache_readahead+0x2ad/0x370 > [ 2096.079787] [<ffffffff811f2014>] force_page_cache_readahead+0x94/0xf0 > [ 2096.087077] [<ffffffff811f2168>] SyS_readahead+0xa8/0xc0 > [ 2096.106427] 1 lock held by trinity-c9/3136: > [ 2096.111097] #0: (&xfs_nondir_ilock_class){++++..}, at: [<ffffffffa03335fa>] xfs_ilock+0xfa/0x260 [xfs] readhead blocking in i_ilock before reading in extents. Nothing here indicates a deadlock. Everything is waiting for locks, but nothing is holding locks in a way that indicates that progress is not being made. This sort of thing can happen when slow storage is massively overloaded - sysrq-w is really the only way to get a better picutre of what is happening here, but so far there's no concrete evidence of a hang from this output. Cheers, Dave.
On Fri, Oct 07, 2016 at 02:56:22PM -0400, CAI Qian wrote: > > > ----- Original Message ----- > > From: "CAI Qian" <caiqian@redhat.com> > > To: "Jan Kara" <jack@suse.cz>, "Miklos Szeredi" <miklos@szeredi.hu>, "tj" <tj@kernel.org>, "Al Viro" > > <viro@ZenIV.linux.org.uk>, "Linus Torvalds" <torvalds@linux-foundation.org>, "Dave Chinner" <david@fromorbit.com> > > Cc: "linux-xfs" <linux-xfs@vger.kernel.org>, "Jens Axboe" <axboe@kernel.dk>, "Nick Piggin" <npiggin@gmail.com>, > > linux-fsdevel@vger.kernel.org, "Dave Jones" <davej@codemonkey.org.uk> > > Sent: Friday, October 7, 2016 11:27:55 AM > > Subject: Re: local DoS - systemd hang or timeout (WAS: Re: [RFC][CFT] splice_read reworked) > > > > > > > > > Hmm, this round of trinity triggered a different hang. > > This hang is reproducible so far with the command below on a overlayfs/xfs, > Another data point is that this hang can also be reproduced using device-mapper thinp > as the docker backend. Again, no evidence that the system is actually hung. Waiting on locks, yes, but nothing to indicate there is a deadlock in those waiters. Cheers, Dave.
----- Original Message ----- > From: "Dave Chinner" <david@fromorbit.com> > To: "CAI Qian" <caiqian@redhat.com> > Cc: "Jan Kara" <jack@suse.cz>, "Miklos Szeredi" <miklos@szeredi.hu>, "tj" <tj@kernel.org>, "Al Viro" > <viro@ZenIV.linux.org.uk>, "Linus Torvalds" <torvalds@linux-foundation.org>, "linux-xfs" > <linux-xfs@vger.kernel.org>, "Jens Axboe" <axboe@kernel.dk>, "Nick Piggin" <npiggin@gmail.com>, > linux-fsdevel@vger.kernel.org, "Dave Jones" <davej@codemonkey.org.uk> > Sent: Sunday, October 9, 2016 5:54:55 PM > Subject: Re: local DoS - systemd hang or timeout (WAS: Re: [RFC][CFT] splice_read reworked) > > Again, no evidence that the system is actually hung. Waiting on > locks, yes, but nothing to indicate there is a deadlock in those > waiters. Here you are, http://people.redhat.com/qcai/tmp/dmesg CAI Qian -- To unsubscribe from this list: send the line "unsubscribe linux-xfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
> Here you are, > > http://people.redhat.com/qcai/tmp/dmesg Also, this turned out to be a regression and bisecting so far pointed out this commit, commit 5d50ac70fe98518dbf620bfba8184254663125eb Merge: 31c1feb 4e14e49 Author: Linus Torvalds <torvalds@linux-foundation.org> Date: Wed Nov 11 20:18:48 2015 -0800 Merge tag 'xfs-for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/g Pull xfs updates from Dave Chinner: "There is nothing really major here - the only significant addition is the per-mount operation statistics infrastructure. Otherwises there's various ACL, xattr, DAX, AIO and logging fixes, and a smattering of small cleanups and fixes elsewhere. Summary: - per-mount operational statistics in sysfs - fixes for concurrent aio append write submission - various logging fixes - detection of zeroed logs and invalid log sequence numbers on v5 filesys - memory allocation failure message improvements - a bunch of xattr/ACL fixes - fdatasync optimisation - miscellaneous other fixes and cleanups" * tag 'xfs-for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/d xfs: give all workqueues rescuer threads xfs: fix log recovery op header validation assert xfs: Fix error path in xfs_get_acl xfs: optimise away log forces on timestamp updates for fdatasync xfs: don't leak uuid table on rmmod xfs: invalidate cached acl if set via ioctl xfs: Plug memory leak in xfs_attrmulti_attr_set xfs: Validate the length of on-disk ACLs xfs: invalidate cached acl if set directly via xattr xfs: xfs_filemap_pmd_fault treats read faults as write faults xfs: add ->pfn_mkwrite support for DAX xfs: DAX does not use IO completion callbacks xfs: Don't use unwritten extents for DAX xfs: introduce BMAPI_ZERO for allocating zeroed extents xfs: fix inode size update overflow in xfs_map_direct() xfs: clear PF_NOFREEZE for xfsaild kthread xfs: fix an error code in xfs_fs_fill_super() xfs: stats are no longer dependent on CONFIG_PROC_FS xfs: simplify /proc teardown & error handling xfs: per-filesystem stats counter implementation ... -- To unsubscribe from this list: send the line "unsubscribe linux-xfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Mon, Oct 10, 2016 at 10:10:29AM -0400, CAI Qian wrote: > > > ----- Original Message ----- > > From: "Dave Chinner" <david@fromorbit.com> > > To: "CAI Qian" <caiqian@redhat.com> > > Cc: "Jan Kara" <jack@suse.cz>, "Miklos Szeredi" <miklos@szeredi.hu>, "tj" <tj@kernel.org>, "Al Viro" > > <viro@ZenIV.linux.org.uk>, "Linus Torvalds" <torvalds@linux-foundation.org>, "linux-xfs" > > <linux-xfs@vger.kernel.org>, "Jens Axboe" <axboe@kernel.dk>, "Nick Piggin" <npiggin@gmail.com>, > > linux-fsdevel@vger.kernel.org, "Dave Jones" <davej@codemonkey.org.uk> > > Sent: Sunday, October 9, 2016 5:54:55 PM > > Subject: Re: local DoS - systemd hang or timeout (WAS: Re: [RFC][CFT] splice_read reworked) > > > > Again, no evidence that the system is actually hung. Waiting on > > locks, yes, but nothing to indicate there is a deadlock in those > > waiters. > Here you are, > > http://people.redhat.com/qcai/tmp/dmesg It's a page lock order bug in the XFS seek hole/data implementation. -Dave.
----- Original Message ----- > From: "Dave Chinner" <david@fromorbit.com> > Sent: Monday, October 10, 2016 5:57:14 PM > > > http://people.redhat.com/qcai/tmp/dmesg > > It's a page lock order bug in the XFS seek hole/data implementation. So reverted this commit against the latest mainline allows trinity run hours. Otherwise, it always hang at fdatasync() within 30 minutes. fc0561cefc04e7803c0f6501ca4f310a502f65b8 xfs: optimise away log forces on timestamp updates for fdatasync PS: tested against the vfs tree's #work.splice_read with this commit reverted is now hanging at sync() instead which won't be reproduced against the mainline so far. http://people.redhat.com/qcai/tmp/dmesg-sync CAI Qian -- To unsubscribe from this list: send the line "unsubscribe linux-xfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Wed, Oct 12, 2016 at 03:50:36PM -0400, CAI Qian wrote: > > > ----- Original Message ----- > > From: "Dave Chinner" <david@fromorbit.com> > > Sent: Monday, October 10, 2016 5:57:14 PM > > > > > http://people.redhat.com/qcai/tmp/dmesg > > > > It's a page lock order bug in the XFS seek hole/data implementation. > So reverted this commit against the latest mainline allows trinity run > hours. Otherwise, it always hang at fdatasync() within 30 minutes. > > fc0561cefc04e7803c0f6501ca4f310a502f65b8 > xfs: optimise away log forces on timestamp updates for fdatasync Has nothing at all to do with the hang. > PS: tested against the vfs tree's #work.splice_read with this commit > reverted is now hanging at sync() instead which won't be reproduced > against the mainline so far. > http://people.redhat.com/qcai/tmp/dmesg-sync It is the same page lock vs seek hole/data issue. Cheers, Dave.
----- Original Message ----- > From: "Dave Chinner" <david@fromorbit.com> > Sent: Wednesday, October 12, 2016 4:59:01 PM > Subject: Re: [bisected] Re: local DoS - systemd hang or timeout (WAS: Re: [RFC][CFT] splice_read reworked) > > On Wed, Oct 12, 2016 at 03:50:36PM -0400, CAI Qian wrote: > > > > > > ----- Original Message ----- > > > From: "Dave Chinner" <david@fromorbit.com> > > > Sent: Monday, October 10, 2016 5:57:14 PM > > > > > > > http://people.redhat.com/qcai/tmp/dmesg > > > > > > It's a page lock order bug in the XFS seek hole/data implementation. > > So reverted this commit against the latest mainline allows trinity run > > hours. Otherwise, it always hang at fdatasync() within 30 minutes. > > > > fc0561cefc04e7803c0f6501ca4f310a502f65b8 > > xfs: optimise away log forces on timestamp updates for fdatasync > > Has nothing at all to do with the hang. > > > PS: tested against the vfs tree's #work.splice_read with this commit > > reverted is now hanging at sync() instead which won't be reproduced > > against the mainline so far. > > http://people.redhat.com/qcai/tmp/dmesg-sync > > It is the same page lock vs seek hole/data issue. FYI, CVE-2016-8660 was assigned for it. CAI Qian -- To unsubscribe from this list: send the line "unsubscribe linux-xfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Oct 13, 2016 at 12:25:30PM -0400, CAI Qian wrote: > > > ----- Original Message ----- > > From: "Dave Chinner" <david@fromorbit.com> > > Sent: Wednesday, October 12, 2016 4:59:01 PM > > Subject: Re: [bisected] Re: local DoS - systemd hang or timeout (WAS: Re: [RFC][CFT] splice_read reworked) > > > > On Wed, Oct 12, 2016 at 03:50:36PM -0400, CAI Qian wrote: > > > > > > > > > ----- Original Message ----- > > > > From: "Dave Chinner" <david@fromorbit.com> > > > > Sent: Monday, October 10, 2016 5:57:14 PM > > > > > > > > > http://people.redhat.com/qcai/tmp/dmesg > > > > > > > > It's a page lock order bug in the XFS seek hole/data implementation. > > > So reverted this commit against the latest mainline allows trinity run > > > hours. Otherwise, it always hang at fdatasync() within 30 minutes. > > > > > > fc0561cefc04e7803c0f6501ca4f310a502f65b8 > > > xfs: optimise away log forces on timestamp updates for fdatasync > > > > Has nothing at all to do with the hang. > > > > > PS: tested against the vfs tree's #work.splice_read with this commit > > > reverted is now hanging at sync() instead which won't be reproduced > > > against the mainline so far. > > > http://people.redhat.com/qcai/tmp/dmesg-sync > > > > It is the same page lock vs seek hole/data issue. > FYI, CVE-2016-8660 was assigned for it. Why? This isn't a security issue - CVEs cost time and effort for everyone to track and follow and raising them for issues like this does not help anyone fix the actual problem. It doesn't help us track it, analyse it, communicate with the bug reporter, test it or get the fix committed. It's meaningless to the developers fixing the code, it's meaningless to users, and it's meaningless to most distros that are supporting XFS because the distro maintainers don't watch the CVE lists for XFS bugs they need to backport and fix. All this does is artificially inflate the supposed importance of the bug. CVEs are for security or severe issues. This is neither serious or a security issue - please have the common courtesy to ask the people with the knowledge to make such a determination (i.e. the maintainers) before you waste the time of a /large number/ of people by raising a useless CVE... Yes, you found a bug. No, it's not a security bug. No, you should not abusing of the CVE process to apply pressure to get it fixed. Please don't do this again. Cheers, Dave.
----- Original Message ----- > From: "Dave Chinner" <david@fromorbit.com> > Sent: Thursday, October 13, 2016 4:49:17 PM > Subject: Re: [bisected] Re: local DoS - systemd hang or timeout (WAS: Re: [RFC][CFT] splice_read reworked) > > Why? This isn't a security issue - CVEs cost time and effort for > everyone to track and follow and raising them for issues like this > does not help anyone fix the actual problem. It doesn't help us > track it, analyse it, communicate with the bug reporter, test it or > get the fix committed. It's meaningless to the developers fixing > the code, it's meaningless to users, and it's meaningless to most > distros that are supporting XFS because the distro maintainers don't > watch the CVE lists for XFS bugs they need to backport and fix. > > All this does is artificially inflate the supposed importance of the > bug. CVEs are for security or severe issues. This is neither serious > or a security issue - please have the common courtesy to ask the > people with the knowledge to make such a determination (i.e. the > maintainers) before you waste the time of a /large number/ of people > by raising a useless CVE... > > Yes, you found a bug. No, it's not a security bug. No, you should > not abusing of the CVE process to apply pressure to get it fixed. > Please don't do this again. As far as I can tell, this is a medium-severity security issue that a non-privileged user can exploit it to cause a system hang/deadlock. Hence, a local DoS for other users use the system. CAI Qian -- To unsubscribe from this list: send the line "unsubscribe linux-xfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/fs/proc/base.c b/fs/proc/base.c index d588d14..489d2d6 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -400,23 +400,6 @@ static const struct file_operations proc_pid_cmdline_ops = { .llseek = generic_file_llseek, }; -static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task) -{ - struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); - if (mm && !IS_ERR(mm)) { - unsigned int nwords = 0; - do { - nwords += 2; - } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ - seq_write(m, mm->saved_auxv, nwords * sizeof(mm->saved_auxv[0])); - mmput(mm); - return 0; - } else - return PTR_ERR(mm); -} - - #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. @@ -1014,6 +997,30 @@ static const struct file_operations proc_environ_operations = { .release = mem_release, }; +static int auxv_open(struct inode *inode, struct file *file) +{ + return __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS); +} + +static ssize_t auxv_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct mm_struct *mm = file->private_data; + unsigned int nwords = 0; + do { + nwords += 2; + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ + return simple_read_from_buffer(buf, count, ppos, mm->saved_auxv, + nwords * sizeof(mm->saved_auxv[0])); +} + +static const struct file_operations proc_auxv_operations = { + .open = auxv_open, + .read = auxv_read, + .llseek = generic_file_llseek, + .release = mem_release, +}; + static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -2822,7 +2829,7 @@ static const struct pid_entry tgid_base_stuff[] = { DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), #endif REG("environ", S_IRUSR, proc_environ_operations), - ONE("auxv", S_IRUSR, proc_pid_auxv), + REG("auxv", S_IRUSR, proc_auxv_operations), ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits), @@ -3210,7 +3217,7 @@ static const struct pid_entry tid_base_stuff[] = { DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), #endif REG("environ", S_IRUSR, proc_environ_operations), - ONE("auxv", S_IRUSR, proc_pid_auxv), + REG("auxv", S_IRUSR, proc_auxv_operations), ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits),