From patchwork Mon Dec 6 22:27:21 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Sin X-Patchwork-Id: 380472 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id oB6MId2U007718 for ; Mon, 6 Dec 2010 22:18:40 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754675Ab0LFWRj (ORCPT ); Mon, 6 Dec 2010 17:17:39 -0500 Received: from comal.ext.ti.com ([198.47.26.152]:35678 "EHLO comal.ext.ti.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754283Ab0LFWQI (ORCPT ); Mon, 6 Dec 2010 17:16:08 -0500 Received: from dlep34.itg.ti.com ([157.170.170.115]) by comal.ext.ti.com (8.13.7/8.13.7) with ESMTP id oB6MFQPm022319 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Mon, 6 Dec 2010 16:15:26 -0600 Received: from legion.dal.design.ti.com (localhost [127.0.0.1]) by dlep34.itg.ti.com (8.13.7/8.13.7) with ESMTP id oB6MFQGI009582; Mon, 6 Dec 2010 16:15:26 -0600 (CST) Received: from localhost (lba0869738.am.dhcp.ti.com [128.247.75.76]) by legion.dal.design.ti.com (8.11.7p1+Sun/8.11.7) with ESMTP id oB6MFPf08144; Mon, 6 Dec 2010 16:15:26 -0600 (CST) From: David Sin To: Greg KH , Russell King , Andrew Morton , linux-kernel@vger.kernel.org, linux-omap@vger.kernel.org, linux-arm-kernel@lists.infradead.org Cc: Lajos Molnar , David Sin Subject: [PATCH 4/9] TILER-DMM: TILER Memory Manager interface and implementation Date: Mon, 6 Dec 2010 16:27:21 -0600 Message-Id: <1291674446-10766-5-git-send-email-davidsin@ti.com> X-Mailer: git-send-email 1.7.0.4 In-Reply-To: <1291674446-10766-1-git-send-email-davidsin@ti.com> References: <1291674446-10766-1-git-send-email-davidsin@ti.com> Sender: linux-omap-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-omap@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Mon, 06 Dec 2010 22:18:40 +0000 (UTC) diff --git a/drivers/misc/tiler/tmm-pat.c b/drivers/misc/tiler/tmm-pat.c new file mode 100644 index 0000000..682f549 --- /dev/null +++ b/drivers/misc/tiler/tmm-pat.c @@ -0,0 +1,266 @@ +/* + * DMM driver support functions for TI TILER hardware block. + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tmm.h" + +/* Page size granularity can be 4k, 16k, or 64k */ +#define DMM_PAGE SZ_4K + +/* Memory limit to cache free pages. TILER will eventually use this much */ +static u32 cache_limit = CONFIG_TILER_CACHE_LIMIT << 20; +module_param_named(cache, cache_limit, uint, 0644); +MODULE_PARM_DESC(cache, "Cache free pages if total memory is under this limit"); + +/* global state - statically initialized */ +static LIST_HEAD(free_list); /* page cache: list of free pages */ +static u32 total_mem; /* total memory allocated (free & used) */ +static u32 refs; /* number of tmm_pat instances */ +static DEFINE_MUTEX(mtx); /* global mutex */ + +/* The page struct pointer and physical address of each page.*/ +struct mem { + struct list_head list; + u32 *pg; /* page struct */ + dma_addr_t pa; /* physical address */ +}; + +/* Used to keep track of mem per tmm_pat_get_pages call */ +struct fast { + struct list_head list; + struct mem **mem; /* array of page info */ + u32 *pa; /* array of physical addresses */ + u32 num; /* number of pages */ +}; + +/* TMM PAT private structure */ +struct dmm_mem { + struct list_head fast_list; + struct dmm *dmm; +}; + +/* + * Frees pages in a fast structure. Moves pages to the free list if there + * are less pages used than max_to_keep. Otherwise, it frees the pages + */ +static void free_fast(struct fast *f) +{ + s32 i = 0; + + /* mutex is locked */ + for (i = 0; i < f->num; i++) { + if (total_mem < cache_limit) { + /* cache free page if under the limit */ + list_add(&f->mem[i]->list, &free_list); + } else { + /* otherwise, free */ + total_mem -= PAGE_SIZE; + dma_free_coherent(NULL, DMM_PAGE, f->mem[i]->pg, + f->mem[i]->pa); + } + } + kfree(f->pa); + kfree(f->mem); + /* remove only if element was added */ + if (f->list.next) + list_del(&f->list); + kfree(f); +} + +/* allocate and flush a page */ +static struct mem *alloc_mem(void) +{ + struct mem *m = kzalloc(sizeof(*m), GFP_KERNEL); + if (!m) + return NULL; + + m->pg = dma_alloc_coherent(NULL, DMM_PAGE, &m->pa, GFP_KERNEL); + if (!m->pg) { + kfree(m); + return NULL; + } + wmb(); + + return m; +} + +static void free_page_cache(void) +{ + struct mem *m, *m_; + + /* mutex is locked */ + list_for_each_entry_safe(m, m_, &free_list, list) { + dma_free_coherent(NULL, DMM_PAGE, m->pg, m->pa); + total_mem -= PAGE_SIZE; + list_del(&m->list); + kfree(m); + } +} + +static void tmm_pat_deinit(struct tmm *tmm) +{ + struct fast *f, *f_; + struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; + + mutex_lock(&mtx); + + /* free all outstanding used memory */ + list_for_each_entry_safe(f, f_, &pvt->fast_list, list) + free_fast(f); + + /* if this is the last tmm_pat, free all memory */ + if (--refs == 0) + free_page_cache(); + + mutex_unlock(&mtx); +} + +static u32 *tmm_pat_get_pages(struct tmm *tmm, u32 n) +{ + struct mem *m; + struct fast *f; + struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + /* array of mem struct pointers */ + f->mem = kzalloc(n * sizeof(*f->mem), GFP_KERNEL); + + /* array of physical addresses */ + f->pa = kzalloc(n * sizeof(*f->pa), GFP_KERNEL); + + /* no pages have been allocated yet (needed for cleanup) */ + f->num = 0; + + if (!f->mem || !f->pa) + goto cleanup; + + /* fill out fast struct mem array with free pages */ + mutex_lock(&mtx); + while (f->num < n) { + /* if there is a free cached page use it */ + if (!list_empty(&free_list)) { + /* unbind first element from list */ + m = list_first_entry(&free_list, typeof(*m), list); + list_del(&m->list); + } else { + mutex_unlock(&mtx); + + /* + * Unlock mutex during allocation and cache flushing. + */ + m = alloc_mem(); + if (!m) + goto cleanup; + + mutex_lock(&mtx); + total_mem += PAGE_SIZE; + } + + f->mem[f->num] = m; + f->pa[f->num++] = m->pa; + } + + list_add(&f->list, &pvt->fast_list); + mutex_unlock(&mtx); + return f->pa; + +cleanup: + free_fast(f); + return NULL; +} + +static void tmm_pat_free_pages(struct tmm *tmm, u32 *page_list) +{ + struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; + struct fast *f, *f_; + + mutex_lock(&mtx); + /* find fast struct based on 1st page */ + list_for_each_entry_safe(f, f_, &pvt->fast_list, list) { + if (f->pa[0] == page_list[0]) { + free_fast(f); + break; + } + } + mutex_unlock(&mtx); +} + +static s32 tmm_pat_map(struct tmm *tmm, struct pat_area area, u32 page_pa) +{ + struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; + struct pat pat_desc = {NULL}; + + /* send pat descriptor to dmm driver */ + pat_desc.ctrl.dir = 0; + pat_desc.ctrl.ini = 0; + pat_desc.ctrl.lut_id = 0; + pat_desc.ctrl.start = 1; + pat_desc.ctrl.sync = 0; + pat_desc.area = area; + pat_desc.next = NULL; + + /* must be a 16-byte aligned physical address */ + pat_desc.data = page_pa; + return dmm_pat_refill(pvt->dmm, &pat_desc, MANUAL); +} + +struct tmm *tmm_pat_init(u32 pat_id) +{ + struct tmm *tmm = NULL; + struct dmm_mem *pvt = NULL; + + struct dmm *dmm = dmm_pat_init(pat_id); + if (dmm) + tmm = kzalloc(sizeof(*tmm), GFP_KERNEL); + if (tmm) + pvt = kzalloc(sizeof(*pvt), GFP_KERNEL); + if (pvt) { + /* private data */ + pvt->dmm = dmm; + INIT_LIST_HEAD(&pvt->fast_list); + + /* increate tmm_pat references */ + mutex_lock(&mtx); + refs++; + mutex_unlock(&mtx); + + /* public data */ + tmm->pvt = pvt; + tmm->deinit = tmm_pat_deinit; + tmm->get = tmm_pat_get_pages; + tmm->free = tmm_pat_free_pages; + tmm->map = tmm_pat_map; + tmm->clear = NULL; /* not yet supported */ + + return tmm; + } + + kfree(pvt); + kfree(tmm); + return NULL; +} +EXPORT_SYMBOL_GPL(tmm_pat_init); diff --git a/drivers/misc/tiler/tmm.h b/drivers/misc/tiler/tmm.h new file mode 100644 index 0000000..cb90664 --- /dev/null +++ b/drivers/misc/tiler/tmm.h @@ -0,0 +1,103 @@ +/* + * TMM interface definition for TI TILER driver. + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TMM_H +#define TMM_H + +#include +/* + * TMM interface + */ +struct tmm { + void *pvt; + + /* function table */ + u32 *(*get) (struct tmm *tmm, u32 num_pages); + void (*free) (struct tmm *tmm, u32 *pages); + s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa); + void (*clear) (struct tmm *tmm, struct pat_area area); + void (*deinit) (struct tmm *tmm); +}; + +/* + * Request a set of pages from the DMM free page stack. + * Return a pointer to a list of physical page addresses. + */ +static inline +u32 *tmm_get(struct tmm *tmm, u32 num_pages) +{ + if (tmm && tmm->pvt) + return tmm->get(tmm, num_pages); + return NULL; +} + +/* + * Return a set of used pages to the DMM free page stack. + */ +static inline +void tmm_free(struct tmm *tmm, u32 *pages) +{ + if (tmm && tmm->pvt) + tmm->free(tmm, pages); +} + +/* + * Program the physical address translator. + */ +static inline +s32 tmm_map(struct tmm *tmm, struct pat_area area, u32 page_pa) +{ + if (tmm && tmm->map && tmm->pvt) + return tmm->map(tmm, area, page_pa); + return -ENODEV; +} + +/* + * Clears the physical address translator. + */ +static inline +void tmm_clear(struct tmm *tmm, struct pat_area area) +{ + if (tmm && tmm->clear && tmm->pvt) + tmm->clear(tmm, area); +} + +/* + * Checks whether tiler memory manager supports mapping + */ +static inline +bool tmm_can_map(struct tmm *tmm) +{ + return tmm && tmm->map; +} + +/* + * Deinitialize tiler memory manager + */ +static inline +void tmm_deinit(struct tmm *tmm) +{ + if (tmm && tmm->pvt) + tmm->deinit(tmm); +} + +/* + * TMM implementation for PAT support. + * + * Initialize TMM for PAT with given id. + */ +struct tmm *tmm_pat_init(u32 pat_id); + +#endif