From patchwork Thu Sep 30 14:04:34 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xin, Xiaohui" X-Patchwork-Id: 220142 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o8UDpwud024517 for ; Thu, 30 Sep 2010 13:52:04 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756444Ab0I3Nso (ORCPT ); Thu, 30 Sep 2010 09:48:44 -0400 Received: from mga03.intel.com ([143.182.124.21]:28312 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756328Ab0I3NsZ (ORCPT ); Thu, 30 Sep 2010 09:48:25 -0400 Received: from azsmga001.ch.intel.com ([10.2.17.19]) by azsmga101.ch.intel.com with ESMTP; 30 Sep 2010 06:47:21 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.57,259,1283756400"; d="scan'208";a="330506301" Received: from unknown (HELO localhost.localdomain.sh.intel.com) ([10.239.36.37]) by azsmga001.ch.intel.com with ESMTP; 30 Sep 2010 06:47:20 -0700 From: xiaohui.xin@intel.com To: netdev@vger.kernel.org, kvm@vger.kernel.org, linux-kernel@vger.kernel.org, mst@redhat.com, mingo@elte.hu, davem@davemloft.net, herbert@gondor.hengli.com.au, jdike@linux.intel.com Cc: Xin Xiaohui Subject: [PATCH v12 17/17]add two new ioctls for mp device. Date: Thu, 30 Sep 2010 22:04:34 +0800 Message-Id: <1c5d7b4e649b8542f6f91b6d289622f3a7393728.1285853725.git.xiaohui.xin@intel.com> X-Mailer: git-send-email 1.7.3 In-Reply-To: <1285855474-12110-1-git-send-email-xiaohui.xin@intel.com> References: <1285855474-12110-1-git-send-email-xiaohui.xin@intel.com> In-Reply-To: <59d8a50047ee01e26658fd676d26c0162b79e5fd.1285853725.git.xiaohui.xin@intel.com> References: <59d8a50047ee01e26658fd676d26c0162b79e5fd.1285853725.git.xiaohui.xin@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Thu, 30 Sep 2010 13:52:04 +0000 (UTC) diff --git a/drivers/vhost/mpassthru.c b/drivers/vhost/mpassthru.c index 1a114d1..41aa59e 100644 --- a/drivers/vhost/mpassthru.c +++ b/drivers/vhost/mpassthru.c @@ -54,6 +54,8 @@ #define COPY_THRESHOLD (L1_CACHE_BYTES * 4) #define COPY_HDR_LEN (L1_CACHE_BYTES < 64 ? 64 : L1_CACHE_BYTES) +#define DEFAULT_NEED ((8192*2*2)*4096) + struct frag { u16 offset; u16 size; @@ -102,8 +104,10 @@ struct page_pool { spinlock_t read_lock; /* record the orignal rlimit */ struct rlimit o_rlim; - /* record the locked pages */ - int lock_pages; + /* userspace wants to locked */ + int locked_pages; + /* currently locked pages */ + int cur_pages; /* the device according to */ struct net_device *dev; /* the mp_port according to dev */ @@ -117,6 +121,7 @@ struct mp_struct { struct net_device *dev; struct page_pool *pool; struct socket socket; + struct task_struct *user; }; struct mp_file { @@ -207,8 +212,8 @@ static int page_pool_attach(struct mp_struct *mp) pool->port.ctor = page_ctor; pool->port.sock = &mp->socket; pool->port.hash = mp_lookup; - pool->lock_pages = 0; - + pool->locked_pages = 0; + pool->cur_pages = 0; /* locked by mp_mutex */ dev->mp_port = &pool->port; mp->pool = pool; @@ -236,37 +241,6 @@ struct page_info *info_dequeue(struct page_pool *pool) return info; } -static int set_memlock_rlimit(struct page_pool *pool, int resource, - unsigned long cur, unsigned long max) -{ - struct rlimit new_rlim, *old_rlim; - int retval; - - if (resource != RLIMIT_MEMLOCK) - return -EINVAL; - new_rlim.rlim_cur = cur; - new_rlim.rlim_max = max; - - old_rlim = current->signal->rlim + resource; - - /* remember the old rlimit value when backend enabled */ - pool->o_rlim.rlim_cur = old_rlim->rlim_cur; - pool->o_rlim.rlim_max = old_rlim->rlim_max; - - if ((new_rlim.rlim_max > old_rlim->rlim_max) && - !capable(CAP_SYS_RESOURCE)) - return -EPERM; - - retval = security_task_setrlimit(resource, &new_rlim); - if (retval) - return retval; - - task_lock(current->group_leader); - *old_rlim = new_rlim; - task_unlock(current->group_leader); - return 0; -} - static void mp_ki_dtor(struct kiocb *iocb) { struct page_info *info = (struct page_info *)(iocb->private); @@ -286,7 +260,7 @@ static void mp_ki_dtor(struct kiocb *iocb) } } /* Decrement the number of locked pages */ - info->pool->lock_pages -= info->pnum; + info->pool->cur_pages -= info->pnum; kmem_cache_free(ext_page_info_cache, info); return; @@ -319,6 +293,7 @@ static int page_pool_detach(struct mp_struct *mp) { struct page_pool *pool; struct page_info *info; + struct task_struct *tsk = mp->user; int i; /* locked by mp_mutex */ @@ -334,9 +309,9 @@ static int page_pool_detach(struct mp_struct *mp) kmem_cache_free(ext_page_info_cache, info); } - set_memlock_rlimit(pool, RLIMIT_MEMLOCK, - pool->o_rlim.rlim_cur, - pool->o_rlim.rlim_max); + down_write(&tsk->mm->mmap_sem); + tsk->mm->locked_vm -= pool->locked_pages; + up_write(&tsk->mm->mmap_sem); /* locked by mp_mutex */ pool->dev->mp_port = NULL; @@ -534,14 +509,11 @@ static struct page_info *alloc_page_info(struct page_pool *pool, int rc; int i, j, n = 0; int len; - unsigned long base, lock_limit; + unsigned long base; struct page_info *info = NULL; - lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; - lock_limit >>= PAGE_SHIFT; - - if (pool->lock_pages + count > lock_limit && npages) { - printk(KERN_INFO "exceed the locked memory rlimit."); + if (pool->cur_pages + count > pool->locked_pages) { + printk(KERN_INFO "Exceed memory lock rlimt."); return NULL; } @@ -603,7 +575,7 @@ static struct page_info *alloc_page_info(struct page_pool *pool, mp_hash_insert(pool, info->pages[i], info); } /* increment the number of locked pages */ - pool->lock_pages += j; + pool->cur_pages += j; return info; failed: @@ -890,7 +862,7 @@ copy: info->pages[i] = NULL; } } - if (!pool->lock_pages) + if (!pool->cur_pages) sock->sk->sk_state_change(sock->sk); if (info != NULL) { @@ -974,12 +946,6 @@ proceed: count--; } - if (!pool->lock_pages) { - set_memlock_rlimit(pool, RLIMIT_MEMLOCK, - iocb->ki_user_data * 4096 * 2, - iocb->ki_user_data * 4096 * 2); - } - /* Translate address to kernel */ info = alloc_page_info(pool, iocb, iov, count, frags, npages, 0); if (!info) @@ -1081,8 +1047,10 @@ static long mp_chr_ioctl(struct file *file, unsigned int cmd, struct mp_struct *mp; struct net_device *dev; void __user* argp = (void __user *)arg; + unsigned long __user *limitp = argp; struct ifreq ifr; struct sock *sk; + unsigned long limit, locked, lock_limit; int ret; ret = -EINVAL; @@ -1122,6 +1090,7 @@ static long mp_chr_ioctl(struct file *file, unsigned int cmd, goto err_dev_put; } mp->dev = dev; + mp->user = current; ret = -ENOMEM; sk = sk_alloc(mfile->net, AF_UNSPEC, GFP_KERNEL, &mp_proto); @@ -1166,6 +1135,40 @@ err_dev_put: rtnl_unlock(); break; + case MPASSTHRU_SET_MEM_LOCKED: + ret = copy_from_user(&limit, limitp, sizeof limit); + if (ret < 0) + return ret; + + mp = mp_get(mfile); + if (!mp) + return -ENODEV; + + limit = PAGE_ALIGN(limit) >> PAGE_SHIFT; + down_write(¤t->mm->mmap_sem); + locked = limit + current->mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { + up_write(¤t->mm->mmap_sem); + mp_put(mfile); + return -ENOMEM; + } + current->mm->locked_vm = locked; + up_write(¤t->mm->mmap_sem); + + mutex_lock(&mp_mutex); + mp->pool->locked_pages = limit; + mutex_unlock(&mp_mutex); + + mp_put(mfile); + return 0; + + case MPASSTHRU_GET_MEM_LOCKED_NEED: + limit = DEFAULT_NEED; + return copy_to_user(limitp, &limit, sizeof limit); + + default: break; } diff --git a/include/linux/mpassthru.h b/include/linux/mpassthru.h index c0973b6..efd12ec 100644 --- a/include/linux/mpassthru.h +++ b/include/linux/mpassthru.h @@ -8,6 +8,8 @@ /* ioctl defines */ #define MPASSTHRU_BINDDEV _IOW('M', 213, int) #define MPASSTHRU_UNBINDDEV _IO('M', 214) +#define MPASSTHRU_SET_MEM_LOCKED _IOW('M', 215, unsigned long) +#define MPASSTHRU_GET_MEM_LOCKED_NEED _IOR('M', 216, unsigned long) #ifdef __KERNEL__ #if defined(CONFIG_MEDIATE_PASSTHRU) || defined(CONFIG_MEDIATE_PASSTHRU_MODULE)