@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include "ttm_page_alloc.h"
#define TTM_MEMORY_ALLOC_RETRIES 4
@@ -394,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
+ ttm_page_alloc_init();
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -413,9 +415,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
+ ttm_page_alloc_fini();
}
EXPORT_SYMBOL(ttm_mem_global_release);
new file mode 100644
@@ -0,0 +1,514 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nienmien <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+
+#include <asm/agp.h>
+#include "ttm/ttm_bo_driver.h"
+#include "ttm_page_alloc.h"
+
+/* Number of 4k pages to add at once */
+#define NUM_PAGES_TO_ADD 64
+/* times are in msecs */
+#define TIME_TO_KEEP_PAGE_IN_POOL 15000
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * list is a FILO stack.
+ * All pages pushed into it must go to begin while end of list must hold the
+ * least recently used pages. When pages in end of the list are not recently
+ * use they are freed.
+ *
+ * All expensive operations must be done outside of pool lock to prevent
+ * contention on lock.
+ */
+struct page_pool {
+ struct list_head list;
+ int gfp_flags;
+ unsigned npages;
+ spinlock_t lock;
+};
+
+# define MAX_NUM_POOLS 4
+
+struct pool_manager {
+ bool page_alloc_inited;
+ unsigned long time_to_wait;
+
+ struct timer_list timer;
+
+ union {
+ struct page_pool pools[MAX_NUM_POOLS];
+ struct {
+ struct page_pool wc_pool;
+ struct page_pool uc_pool;
+ struct page_pool wc_pool_dma32;
+ struct page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+
+static struct pool_manager _manager;
+
+#ifdef CONFIG_X86
+/* TODO: add this to x86 like _uc, this version here is inefficient */
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ set_memory_wc((unsigned long)page_address(pages[i]), 1);
+ return 0;
+}
+#else
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif
+
+
+static void ttm_page_pool_init_locked(struct page_pool *pool)
+{
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->list);
+}
+
+static struct page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager.pools[pool_index];
+}
+
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+ unsigned i;
+ set_pages_array_wb(pages, npages);
+ for (i = 0; i < npages; ++i)
+ __free_page(pages[i]);
+}
+
+/**
+ * Free pages from pool.
+ * If limit_with_time is set to true pages that wil lbe have to have been
+ * in pool for time specified by pool_manager.time_to_wait.
+ *
+ * Pages are freed in NUM_PAES_TO_ADD chunks to prevent freeing from
+ * blocking others operations on pool.
+ *
+ * This is called from the timer handler.
+ **/
+static void ttm_page_pool_free(struct page_pool *pool,
+ const bool limit_with_time)
+{
+ struct page *page;
+ struct page *pages_to_free[NUM_PAGES_TO_ADD];
+ unsigned freed_pages;
+ unsigned long now = jiffies + _manager.time_to_wait;
+ unsigned long irq_flags;
+
+restart:
+ freed_pages = 0;
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ {
+ list_for_each_entry_reverse(page, &pool->list, lru) {
+
+ if (limit_with_time
+ && time_after(now, page->private))
+ break;
+
+ pages_to_free[freed_pages++] = page;
+ if (freed_pages >= NUM_PAGES_TO_ADD) {
+ /* Limit the maximum time that we hold the lock
+ * not to block more important threads */
+ __list_del(page->lru.prev, &pool->list);
+ pool->npages -= freed_pages;
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ goto restart;
+ }
+ }
+
+ pool->npages -= freed_pages;
+ if (freed_pages)
+ __list_del(&page->lru, &pool->list);
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+ }
+}
+
+/* periodicaly free unused pages from pool */
+static void ttm_page_pool_timer_handler(unsigned long man_addr)
+{
+ int i;
+ struct pool_manager *m = (struct pool_manager *)man_addr;
+
+ for (i = 0; i < MAX_NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager.pools[i], true);
+
+ mod_timer(&m->timer, jiffies + msecs_to_jiffies(PAGE_FREE_INTERVAL));
+}
+
+static void ttm_page_pool_timer_create(struct pool_manager *m)
+{
+ /* Create a timer that is fired every second to clean
+ * the free list */
+ init_timer(&m->timer);
+ m->timer.expires = jiffies + msecs_to_jiffies(PAGE_FREE_INTERVAL);
+ m->timer.function = ttm_page_pool_timer_handler;
+ m->timer.data = (unsigned long)m;
+}
+
+static void ttm_page_pool_timer_free(struct pool_manager *m)
+{
+ del_timer_sync(&m->timer);
+}
+
+static void ttm_set_pages_caching(struct page **pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ set_pages_array_uc(pages, cpages);
+ break;
+ case tt_wc:
+ set_pages_array_wc(pages, cpages);
+ break;
+ default:
+ break;
+ }
+}
+/**
+ * Fill new pages to array.
+ */
+static int ttm_pages_pool_fill_locked(struct page_pool *pool, int flags,
+ enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
+{
+ unsigned pages_to_alloc = count - pool->npages;
+ struct page **pages;
+ struct page *page;
+ struct list_head h;
+ unsigned i, cpages;
+ unsigned max_cpages = min(pages_to_alloc,
+ (unsigned)(PAGE_SIZE/sizeof(*page)));
+
+ /* reserve allready allocated pages from pool */
+ INIT_LIST_HEAD(&h);
+ list_splice_init(&pool->list, &h);
+
+ /* We are now safe to allow others users to modify the pool so unlock
+ * it. Allocating pages is expensive operation. */
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ pages = kmalloc(pages_to_alloc*sizeof(*page), GFP_KERNEL);
+
+ if (!pages) {
+ printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ return -ENOMEM;
+ }
+
+ for (i = 0, cpages = 0; i < pages_to_alloc; ++i) {
+ page = alloc_page(pool->gfp_flags);
+ if (!page) {
+ printk(KERN_ERR "[ttm] unable to get page %d\n", i);
+
+ ttm_put_pages(&h, flags, cstate);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(page))
+#endif
+ {
+ pages[cpages++] = page;
+ if (cpages == max_cpages) {
+
+ ttm_set_pages_caching(pages, cstate, cpages);
+ cpages = 0;
+ }
+ }
+ list_add(&page->lru, &h);
+ }
+
+ if (cpages)
+ ttm_set_pages_caching(pages, cstate, cpages);
+
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+
+ list_splice(&h, &pool->list);
+
+ pool->npages += pages_to_alloc;
+
+ return 0;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ struct page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *page = NULL;
+ unsigned long irq_flags;
+ int gfp_flags = 0;
+ int retries = 1;
+ int r;
+
+ /* No pool for cached pages */
+ if (cstate == tt_cached) {
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= __GFP_HIGHMEM;
+
+ /* fallback in case of problems with pool allocation */
+alloc_pages:
+ for (r = 0; r < count; ++r) {
+ page = alloc_page(gfp_flags);
+ if (!page) {
+
+ printk(KERN_ERR "[ttm] unable to allocate page.");
+ return -ENOMEM;
+ }
+
+ list_add(&page->lru, pages);
+ }
+ return 0;
+ }
+
+ gfp_flags = pool->gfp_flags;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ do {
+ /* First we look if pool has the page we want */
+ if (pool->npages >= count) {
+
+
+ /* Find the cut location */
+ if (count <= pool->npages/2) {
+ unsigned c = 0;
+ list_for_each_entry(page, &pool->list, lru) {
+ if (++c >= count)
+ break;
+ }
+ } else {
+ /* We have to point to the last pages cut */
+ unsigned c = 0;
+ unsigned rcount = pool->npages - count + 1;
+
+ list_for_each_entry_reverse(page, &pool->list, lru) {
+ if (++c >= rcount)
+ break;
+ }
+ }
+
+ list_cut_position(pages, &pool->list, &page->lru);
+
+ pool->npages -= count;
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(page, pages, lru) {
+ clear_page(page_address(page));
+ }
+ }
+
+ return 0;
+ }
+
+ if (retries == 0)
+ break;
+
+ --retries;
+
+ r = ttm_pages_pool_fill_locked(pool, flags, cstate,
+ count, &irq_flags);
+ if (r) {
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return r;
+ }
+
+ } while (1);
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ /* Pool allocator failed without errors. we try to allocate new pages
+ * instead. */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ goto alloc_pages;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate)
+{
+ struct page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+ unsigned page_count = 0;
+ unsigned long irq_flags;
+ unsigned long now = jiffies;
+
+ if (pool == NULL) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* We took the ownership -> clear the pages list*/
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+
+#ifdef CONFIG_HIGHMEM
+ /* we don't have pool for highmem -> free them */
+ if (PageHighMem(p)) {
+ list_del(&p->lru);
+ __free_page(p);
+ } else
+#endif
+ {
+ p->private = now;
+ ++page_count;
+ }
+
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice(pages, &pool->list);
+ pool->npages += page_count;
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ /* We took the ownership -> clear the pages list*/
+ INIT_LIST_HEAD(pages);
+}
+
+
+int ttm_page_alloc_init(void)
+{
+ if (_manager.page_alloc_inited)
+ return 0;
+
+ ttm_page_pool_init_locked(&_manager.wc_pool);
+ _manager.wc_pool.gfp_flags = GFP_HIGHUSER;
+ ttm_page_pool_init_locked(&_manager.uc_pool);
+ _manager.uc_pool.gfp_flags = GFP_HIGHUSER;
+ ttm_page_pool_init_locked(&_manager.wc_pool_dma32);
+ _manager.wc_pool_dma32.gfp_flags = GFP_USER | GFP_DMA32;
+ ttm_page_pool_init_locked(&_manager.uc_pool_dma32);
+ _manager.uc_pool_dma32.gfp_flags = GFP_USER | GFP_DMA32;
+
+ _manager.time_to_wait = msecs_to_jiffies(TIME_TO_KEEP_PAGE_IN_POOL);
+ ttm_page_pool_timer_create(&_manager);
+ _manager.page_alloc_inited = 1;
+ return 0;
+}
+
+void ttm_page_alloc_fini(void)
+{
+ int i;
+
+ if (!_manager.page_alloc_inited)
+ return;
+
+ ttm_page_pool_timer_free(&_manager);
+
+ for (i = 0; i < MAX_NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager.pools[i], false);
+
+ _manager.page_alloc_inited = 0;
+}
new file mode 100644
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include "ttm/ttm_bo_driver.h"
+
+void ttm_put_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate);
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count);
+int ttm_page_alloc_init(void);
+void ttm_page_alloc_fini(void);
+
+#endif
@@ -38,6 +38,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
@@ -72,21 +73,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
ttm->pages = NULL;
}
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
- gfp_t gfp_flags = GFP_USER;
-
- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= __GFP_DMA32;
- else
- gfp_flags |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_flags);
-}
-
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
@@ -127,15 +113,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
+ struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
- p = ttm_tt_alloc_page(ttm->page_flags);
- if (!p)
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+ if (ret != 0)
return NULL;
+ p = list_first_entry(&h, struct page, lru);
+
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
@@ -244,10 +236,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (c_state != tt_cached) {
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
}
if (ttm->caching_state == tt_cached)
@@ -298,25 +290,32 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
+ unsigned count = 0;
+ struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
+ INIT_LIST_HEAD(&h);
+
if (be)
be->func->clear(be);
- (void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) {
+
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
if (page_count(cur_page) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
- "Leaking pages.\n");
+ "Leaking pages (%d/%d).\n",
+ page_count(cur_page), count);
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
- __free_page(cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
}
}
+ ttm_put_pages(&h, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;