From patchwork Wed May 26 14:18:50 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Thomas Hellstrom X-Patchwork-Id: 102407 Received: from lists.sourceforge.net (lists.sourceforge.net [216.34.181.88]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o4QEKlDR030833 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO) for ; Wed, 26 May 2010 14:21:23 GMT Received: from localhost ([127.0.0.1] helo=sfs-ml-3.v29.ch3.sourceforge.com) by sfs-ml-3.v29.ch3.sourceforge.com with esmtp (Exim 4.69) (envelope-from ) id 1OHHRb-0000qs-PA; Wed, 26 May 2010 14:18:51 +0000 Received: from sfi-mx-1.v28.ch3.sourceforge.com ([172.29.28.121] helo=mx.sourceforge.net) by sfs-ml-3.v29.ch3.sourceforge.com with esmtp (Exim 4.69) (envelope-from ) id 1OHHRa-0000qh-N5 for dri-devel@lists.sourceforge.net; Wed, 26 May 2010 14:18:50 +0000 X-ACL-Warn: Received: from smtp-outbound-1.vmware.com ([65.115.85.69]) by sfi-mx-1.v28.ch3.sourceforge.com with esmtp (Exim 4.69) id 1OHHRZ-00015V-AH for dri-devel@lists.sourceforge.net; Wed, 26 May 2010 14:18:50 +0000 Received: from mailhost4.vmware.com (mailhost4.vmware.com [10.16.67.124]) by smtp-outbound-1.vmware.com (Postfix) with ESMTP id D46301A003; Wed, 26 May 2010 07:18:43 -0700 (PDT) Received: from localhost.localdomain (unknown [10.16.255.6]) by mailhost4.vmware.com (Postfix) with ESMTP id 67CDCC9A53; Wed, 26 May 2010 07:18:42 -0700 (PDT) From: Thomas Hellstrom To: airlied@redhat.com Subject: [PATCH 2/2] drm/ttm: Fix ttm_page_alloc.c Date: Wed, 26 May 2010 16:18:50 +0200 Message-Id: <1274883530-8017-2-git-send-email-thellstrom@vmware.com> X-Mailer: git-send-email 1.6.2.5 In-Reply-To: <1274883530-8017-1-git-send-email-thellstrom@vmware.com> References: <1274883530-8017-1-git-send-email-thellstrom@vmware.com> X-Spam-Score: 0.0 (/) X-Spam-Report: Spam Filtering performed by mx.sourceforge.net. See http://spamassassin.org/tag/ for more details. _SUMMARY_ X-Headers-End: 1OHHRZ-00015V-AH Cc: jglisse@redhat.com, dri-devel@lists.sourceforge.net, Thomas Hellstrom X-BeenThere: dri-devel@lists.sourceforge.net X-Mailman-Version: 2.1.9 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: dri-devel-bounces@lists.sourceforge.net X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Wed, 26 May 2010 14:21:23 +0000 (UTC) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b6d1523..ef91069 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -77,7 +77,7 @@ struct ttm_page_pool { /** * Limits for the pool. They are handled without locks because only place where * they may change is in sysfs store. They won't have immediate effect anyway - * so forcing serialiazation to access them is pointless. + * so forcing serialization to access them is pointless. */ struct ttm_pool_opts { @@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj, m->options.small = val; else if (attr == &ttm_page_pool_alloc_size) { if (val > NUM_PAGES_TO_ALLOC*8) { - printk(KERN_ERR "[ttm] Setting allocation size to %lu " - "is not allowed. Recomended size is " - "%lu\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + printk(KERN_ERR TTM_PFX + "Setting allocation size to %lu " + "is not allowed. Recommended size is " + "%lu\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); return size; } else if (val > NUM_PAGES_TO_ALLOC) { - printk(KERN_WARNING "[ttm] Setting allocation size to " - "larger than %lu is not recomended.\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + printk(KERN_WARNING TTM_PFX + "Setting allocation size to " + "larger than %lu is not recommended.\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); } m->options.alloc_size = val; } @@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) { unsigned i; if (set_pages_array_wb(pages, npages)) - printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", + printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", npages); for (i = 0; i < npages; ++i) __free_page(pages[i]); @@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), GFP_KERNEL); if (!pages_to_free) { - printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); + printk(KERN_ERR TTM_PFX + "Failed to allocate memory for pool free operation.\n"); return 0; } @@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void) } /** - * Calback for mm to request pool to reduce number of page held. + * Callback for mm to request pool to reduce number of page held. */ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) { @@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages, case tt_uncached: r = set_pages_array_uc(pages, cpages); if (r) - printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", - cpages); + printk(KERN_ERR TTM_PFX + "Failed to set %d pages to uc!\n", + cpages); break; case tt_wc: r = set_pages_array_wc(pages, cpages); if (r) - printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", - cpages); + printk(KERN_ERR TTM_PFX + "Failed to set %d pages to wc!\n", + cpages); break; default: break; @@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, struct page **failed_pages, unsigned cpages) { unsigned i; - /* Failed pages has to be reed */ + /* Failed pages have to be freed */ for (i = 0; i < cpages; ++i) { list_del(&failed_pages[i]->lru); __free_page(failed_pages[i]); @@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); if (!caching_array) { - printk(KERN_ERR "[ttm] unable to allocate table for new pages."); + printk(KERN_ERR TTM_PFX + "Unable to allocate table for new pages."); return -ENOMEM; } @@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, p = alloc_page(gfp_flags); if (!p) { - printk(KERN_ERR "[ttm] unable to get page %u\n", i); + printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); /* store already allocated pages in the pool after * setting the caching state */ if (cpages) { - r = ttm_set_pages_caching(caching_array, cstate, cpages); + r = ttm_set_pages_caching(caching_array, + cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, @@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ++pool->nrefills; pool->npages += alloc_size; } else { - printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); + printk(KERN_ERR TTM_PFX + "Failed to fill pool (%p).", pool); /* If we have any pages left put them to the pool. */ list_for_each_entry(p, &pool->list, lru) { ++cpages; @@ -677,7 +685,8 @@ int ttm_get_pages(struct list_head *pages, int flags, p = alloc_page(gfp_flags); if (!p) { - printk(KERN_ERR "[ttm] unable to allocate page."); + printk(KERN_ERR TTM_PFX + "Unable to allocate page."); return -ENOMEM; } @@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags, if (r) { /* If there is any pages in the list put them back to * the pool. */ - printk(KERN_ERR "[ttm] Failed to allocate extra pages " - "for large request."); + printk(KERN_ERR TTM_PFX + "Failed to allocate extra pages " + "for large request."); ttm_put_pages(pages, 0, flags, cstate); return r; } @@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) return 0; - printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); + printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); @@ -813,7 +823,7 @@ void ttm_page_alloc_fini() if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) return; - printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); + printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); ttm_pool_mm_shrink_fini(&_manager); for (i = 0; i < NUM_POOLS; ++i)