@@ -390,4 +390,82 @@ extern dsp_status drv_release_resources(IN u32 dw_context,
void bridge_recover_schedule(void);
#endif
+/*
+ * ======== mem_ext_phys_pool_init ========
+ * Purpose:
+ * Uses the physical memory chunk passed for internal consitent memory
+ * allocations.
+ * physical address based on the page frame address.
+ * Parameters:
+ * poolPhysBase starting address of the physical memory pool.
+ * poolSize size of the physical memory pool.
+ * Returns:
+ * none.
+ * Requires:
+ * - MEM initialized.
+ * - valid physical address for the base and size > 0
+ */
+extern void mem_ext_phys_pool_init(IN u32 poolPhysBase, IN u32 poolSize);
+
+/*
+ * ======== mem_ext_phys_pool_release ========
+ */
+extern void mem_ext_phys_pool_release(void);
+
+/* ======== mem_alloc_phys_mem ========
+ * Purpose:
+ * Allocate physically contiguous, uncached memory
+ * Parameters:
+ * byte_size: Number of bytes to allocate.
+ * ulAlign: Alignment Mask.
+ * pPhysicalAddress: Physical address of allocated memory.
+ * Returns:
+ * Pointer to a block of memory;
+ * NULL if memory couldn't be allocated, or if byte_size == 0.
+ * Requires:
+ * MEM initialized.
+ * Ensures:
+ * The returned pointer, if not NULL, points to a valid memory block of
+ * the size requested. Returned physical address refers to physical
+ * location of memory.
+ */
+extern void *mem_alloc_phys_mem(IN u32 byte_size,
+ IN u32 ulAlign, OUT u32 *pPhysicalAddress);
+
+/*
+ * ======== mem_flush_cache ========
+ * Purpose:
+ * Performs system cache sync with discard
+ * Parameters:
+ * pMemBuf: Pointer to memory region to be flushed.
+ * pMemBuf: Size of the memory region to be flushed.
+ * Returns:
+ * Requires:
+ * MEM is initialized.
+ * Ensures:
+ * Cache is synchronized
+ */
+extern void mem_flush_cache(void *pMemBuf, u32 byte_size, s32 FlushType);
+
+/*
+ * ======== mem_free_phys_mem ========
+ * Purpose:
+ * Free the given block of physically contiguous memory.
+ * Parameters:
+ * pVirtualAddress: Pointer to virtual memory region allocated
+ * by mem_alloc_phys_mem().
+ * pPhysicalAddress: Pointer to physical memory region allocated
+ * by mem_alloc_phys_mem().
+ * byte_size: Size of the memory region allocated by mem_alloc_phys_mem().
+ * Returns:
+ * Requires:
+ * MEM initialized.
+ * pVirtualAddress is a valid memory address returned by
+ * mem_alloc_phys_mem()
+ * Ensures:
+ * pVirtualAddress is no longer a valid pointer to memory.
+ */
+extern void mem_free_phys_mem(void *pVirtualAddress,
+ u32 pPhysicalAddress, u32 byte_size);
+
#endif /* DRV_ */
@@ -68,26 +68,6 @@ extern void *mem_alloc(IN u32 byte_size, IN enum mem_poolattrs type);
} \
}
-/* ======== mem_alloc_phys_mem ========
- * Purpose:
- * Allocate physically contiguous, uncached memory
- * Parameters:
- * byte_size: Number of bytes to allocate.
- * ulAlign: Alignment Mask.
- * pPhysicalAddress: Physical address of allocated memory.
- * Returns:
- * Pointer to a block of memory;
- * NULL if memory couldn't be allocated, or if byte_size == 0.
- * Requires:
- * MEM initialized.
- * Ensures:
- * The returned pointer, if not NULL, points to a valid memory block of
- * the size requested. Returned physical address refers to physical
- * location of memory.
- */
-extern void *mem_alloc_phys_mem(IN u32 byte_size,
- IN u32 ulAlign, OUT u32 *pPhysicalAddress);
-
/*
* ======== mem_calloc ========
* Purpose:
@@ -124,42 +104,6 @@ extern void *mem_calloc(IN u32 byte_size, IN enum mem_poolattrs type);
extern void mem_exit(void);
/*
- * ======== mem_flush_cache ========
- * Purpose:
- * Performs system cache sync with discard
- * Parameters:
- * pMemBuf: Pointer to memory region to be flushed.
- * pMemBuf: Size of the memory region to be flushed.
- * Returns:
- * Requires:
- * MEM is initialized.
- * Ensures:
- * Cache is synchronized
- */
-extern void mem_flush_cache(void *pMemBuf, u32 byte_size, s32 FlushType);
-
-/*
- * ======== mem_free_phys_mem ========
- * Purpose:
- * Free the given block of physically contiguous memory.
- * Parameters:
- * pVirtualAddress: Pointer to virtual memory region allocated
- * by mem_alloc_phys_mem().
- * pPhysicalAddress: Pointer to physical memory region allocated
- * by mem_alloc_phys_mem().
- * byte_size: Size of the memory region allocated by mem_alloc_phys_mem().
- * Returns:
- * Requires:
- * MEM initialized.
- * pVirtualAddress is a valid memory address returned by
- * mem_alloc_phys_mem()
- * Ensures:
- * pVirtualAddress is no longer a valid pointer to memory.
- */
-extern void mem_free_phys_mem(void *pVirtualAddress,
- u32 pPhysicalAddress, u32 byte_size);
-
-/*
* ======== MEM_FREE_OBJECT ========
* Purpose:
* Utility macro to invalidate an object's signature, and deallocate it.
@@ -255,26 +199,4 @@ extern bool services_mem_init(void);
*/
#define MEM_UNMAP_LINEAR_ADDRESS(pBaseAddr) {}
-/*
- * ======== mem_ext_phys_pool_init ========
- * Purpose:
- * Uses the physical memory chunk passed for internal consitent memory
- * allocations.
- * physical address based on the page frame address.
- * Parameters:
- * poolPhysBase starting address of the physical memory pool.
- * poolSize size of the physical memory pool.
- * Returns:
- * none.
- * Requires:
- * - MEM initialized.
- * - valid physical address for the base and size > 0
- */
-extern void mem_ext_phys_pool_init(IN u32 poolPhysBase, IN u32 poolSize);
-
-/*
- * ======== mem_ext_phys_pool_release ========
- */
-extern void mem_ext_phys_pool_release(void);
-
#endif /* MEM_ */
@@ -64,6 +64,14 @@ struct drv_ext {
/* ----------------------------------- Globals */
static s32 refs;
+static bool ext_phys_mem_pool_enabled;
+struct ext_phys_mem_pool {
+ u32 phys_mem_base;
+ u32 phys_mem_size;
+ u32 virt_mem_base;
+ u32 next_phys_alloc_ptr;
+};
+static struct ext_phys_mem_pool ext_mem_pool;
/* ----------------------------------- Function Prototypes */
static dsp_status request_bridge_resources(u32 dw_context, s32 fRequest);
@@ -1068,3 +1076,148 @@ static dsp_status request_bridge_resources_dsp(u32 dw_context, s32 bRequest)
/* End Mem alloc */
return status;
}
+
+void mem_ext_phys_pool_init(u32 poolPhysBase, u32 poolSize)
+{
+ u32 pool_virt_base;
+
+ /* get the virtual address for the physical memory pool passed */
+ pool_virt_base = (u32) ioremap(poolPhysBase, poolSize);
+
+ if ((void **)pool_virt_base == NULL) {
+ pr_err("%s: external physical memory map failed\n", __func__);
+ ext_phys_mem_pool_enabled = false;
+ } else {
+ ext_mem_pool.phys_mem_base = poolPhysBase;
+ ext_mem_pool.phys_mem_size = poolSize;
+ ext_mem_pool.virt_mem_base = pool_virt_base;
+ ext_mem_pool.next_phys_alloc_ptr = poolPhysBase;
+ ext_phys_mem_pool_enabled = true;
+ }
+}
+
+void mem_ext_phys_pool_release(void)
+{
+ if (ext_phys_mem_pool_enabled) {
+ iounmap((void *)(ext_mem_pool.virt_mem_base));
+ ext_phys_mem_pool_enabled = false;
+ }
+}
+
+/*
+ * ======== mem_ext_phys_mem_alloc ========
+ * Purpose:
+ * Allocate physically contiguous, uncached memory from external memory pool
+ */
+
+static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, OUT u32 * pPhysAddr)
+{
+ u32 new_alloc_ptr;
+ u32 offset;
+ u32 virt_addr;
+
+ if (align == 0)
+ align = 1;
+
+ if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
+ - ext_mem_pool.next_phys_alloc_ptr)) {
+ pPhysAddr = NULL;
+ return NULL;
+ } else {
+ offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
+ if (offset == 0)
+ new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
+ else
+ new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
+ (align - offset);
+ if ((new_alloc_ptr + bytes) <=
+ (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
+ /* we can allocate */
+ *pPhysAddr = new_alloc_ptr;
+ ext_mem_pool.next_phys_alloc_ptr =
+ new_alloc_ptr + bytes;
+ virt_addr =
+ ext_mem_pool.virt_mem_base + (new_alloc_ptr -
+ ext_mem_pool.
+ phys_mem_base);
+ return (void *)virt_addr;
+ } else {
+ *pPhysAddr = 0;
+ return NULL;
+ }
+ }
+}
+
+/*
+ * ======== mem_alloc_phys_mem ========
+ * Purpose:
+ * Allocate physically contiguous, uncached memory
+ */
+void *mem_alloc_phys_mem(u32 byte_size, u32 ulAlign, OUT u32 * pPhysicalAddress)
+{
+ void *va_mem = NULL;
+ dma_addr_t pa_mem;
+
+ if (byte_size > 0) {
+ if (ext_phys_mem_pool_enabled) {
+ va_mem = mem_ext_phys_mem_alloc(byte_size, ulAlign,
+ (u32 *) &pa_mem);
+ } else
+ va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
+ GFP_KERNEL);
+ if (va_mem == NULL)
+ *pPhysicalAddress = 0;
+ else
+ *pPhysicalAddress = pa_mem;
+ }
+ return va_mem;
+}
+
+/*
+ * ======== mem_flush_cache ========
+ * Purpose:
+ * Flush cache
+ */
+void mem_flush_cache(void *pMemBuf, u32 byte_size, s32 FlushType)
+{
+ if (!pMemBuf)
+ return;
+
+ switch (FlushType) {
+ /* invalidate only */
+ case PROC_INVALIDATE_MEM:
+ dmac_inv_range(pMemBuf, pMemBuf + byte_size);
+ outer_inv_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
+ byte_size));
+ break;
+ /* writeback only */
+ case PROC_WRITEBACK_MEM:
+ dmac_clean_range(pMemBuf, pMemBuf + byte_size);
+ outer_clean_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
+ byte_size));
+ break;
+ /* writeback and invalidate */
+ case PROC_WRITEBACK_INVALIDATE_MEM:
+ dmac_flush_range(pMemBuf, pMemBuf + byte_size);
+ outer_flush_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
+ byte_size));
+ break;
+ }
+
+}
+
+/*
+ * ======== mem_free_phys_mem ========
+ * Purpose:
+ * Free the given block of physically contiguous memory.
+ */
+void mem_free_phys_mem(void *pVirtualAddress, u32 pPhysicalAddress,
+ u32 byte_size)
+{
+ DBC_REQUIRE(pVirtualAddress != NULL);
+
+ if (!ext_phys_mem_pool_enabled)
+ dma_free_coherent(NULL, byte_size, pVirtualAddress,
+ pPhysicalAddress);
+}
+
@@ -35,87 +35,7 @@
#define MEM512MB 0x1fffffff
/* ----------------------------------- Globals */
-static bool ext_phys_mem_pool_enabled;
-struct ext_phys_mem_pool {
- u32 phys_mem_base;
- u32 phys_mem_size;
- u32 virt_mem_base;
- u32 next_phys_alloc_ptr;
-};
-
-static struct ext_phys_mem_pool ext_mem_pool;
-
-void mem_ext_phys_pool_init(u32 poolPhysBase, u32 poolSize)
-{
- u32 pool_virt_base;
-
- /* get the virtual address for the physical memory pool passed */
- pool_virt_base = (u32) ioremap(poolPhysBase, poolSize);
-
- if ((void **)pool_virt_base == NULL) {
- pr_err("%s: external physical memory map failed\n", __func__);
- ext_phys_mem_pool_enabled = false;
- } else {
- ext_mem_pool.phys_mem_base = poolPhysBase;
- ext_mem_pool.phys_mem_size = poolSize;
- ext_mem_pool.virt_mem_base = pool_virt_base;
- ext_mem_pool.next_phys_alloc_ptr = poolPhysBase;
- ext_phys_mem_pool_enabled = true;
- }
-}
-
-void mem_ext_phys_pool_release(void)
-{
- if (ext_phys_mem_pool_enabled) {
- iounmap((void *)(ext_mem_pool.virt_mem_base));
- ext_phys_mem_pool_enabled = false;
- }
-}
-
-/*
- * ======== mem_ext_phys_mem_alloc ========
- * Purpose:
- * Allocate physically contiguous, uncached memory from external memory pool
- */
-
-static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, OUT u32 * pPhysAddr)
-{
- u32 new_alloc_ptr;
- u32 offset;
- u32 virt_addr;
-
- if (align == 0)
- align = 1;
-
- if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
- - ext_mem_pool.next_phys_alloc_ptr)) {
- pPhysAddr = NULL;
- return NULL;
- } else {
- offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
- if (offset == 0)
- new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
- else
- new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
- (align - offset);
- if ((new_alloc_ptr + bytes) <=
- (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
- /* we can allocate */
- *pPhysAddr = new_alloc_ptr;
- ext_mem_pool.next_phys_alloc_ptr =
- new_alloc_ptr + bytes;
- virt_addr =
- ext_mem_pool.virt_mem_base + (new_alloc_ptr -
- ext_mem_pool.
- phys_mem_base);
- return (void *)virt_addr;
- } else {
- *pPhysAddr = 0;
- return NULL;
- }
- }
-}
/*
* ======== mem_alloc ========
@@ -146,31 +66,6 @@ void *mem_alloc(u32 byte_size, enum mem_poolattrs type)
return mem;
}
-/*
- * ======== mem_alloc_phys_mem ========
- * Purpose:
- * Allocate physically contiguous, uncached memory
- */
-void *mem_alloc_phys_mem(u32 byte_size, u32 ulAlign, OUT u32 * pPhysicalAddress)
-{
- void *va_mem = NULL;
- dma_addr_t pa_mem;
-
- if (byte_size > 0) {
- if (ext_phys_mem_pool_enabled) {
- va_mem = mem_ext_phys_mem_alloc(byte_size, ulAlign,
- (u32 *) &pa_mem);
- } else
- va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
- (in_atomic()) ? GFP_ATOMIC :
- GFP_KERNEL);
- if (va_mem == NULL)
- *pPhysicalAddress = 0;
- else
- *pPhysicalAddress = pa_mem;
- }
- return va_mem;
-}
/*
* ======== mem_calloc ========
@@ -211,53 +106,7 @@ void mem_exit(void)
{
}
-/*
- * ======== mem_flush_cache ========
- * Purpose:
- * Flush cache
- */
-void mem_flush_cache(void *pMemBuf, u32 byte_size, s32 FlushType)
-{
- if (!pMemBuf)
- return;
-
- switch (FlushType) {
- /* invalidate only */
- case PROC_INVALIDATE_MEM:
- dmac_inv_range(pMemBuf, pMemBuf + byte_size);
- outer_inv_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
- byte_size));
- break;
- /* writeback only */
- case PROC_WRITEBACK_MEM:
- dmac_clean_range(pMemBuf, pMemBuf + byte_size);
- outer_clean_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
- byte_size));
- break;
- /* writeback and invalidate */
- case PROC_WRITEBACK_INVALIDATE_MEM:
- dmac_flush_range(pMemBuf, pMemBuf + byte_size);
- outer_flush_range(__pa((u32) pMemBuf), __pa((u32) pMemBuf +
- byte_size));
- break;
- }
-
-}
-
-/*
- * ======== mem_free_phys_mem ========
- * Purpose:
- * Free the given block of physically contiguous memory.
- */
-void mem_free_phys_mem(void *pVirtualAddress, u32 pPhysicalAddress,
- u32 byte_size)
-{
- DBC_REQUIRE(pVirtualAddress != NULL);
- if (!ext_phys_mem_pool_enabled)
- dma_free_coherent(NULL, byte_size, pVirtualAddress,
- pPhysicalAddress);
-}
/*
* ======== services_mem_init ========