@@ -49,7 +49,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
{
struct device *dev = &cdev->pdev->dev;
struct addr_tbl_entry *entry;
- u32 pbl_size, i;
+ u32 i;
if (!chain->pbl.pp_addr_tbl)
return;
@@ -63,11 +63,10 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
entry->dma_map);
}
- pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
-
if (!chain->b_external_pbl)
- dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table,
- chain->pbl_sp.p_phys_table);
+ dma_free_coherent(dev, chain->pbl_sp.table_size,
+ chain->pbl_sp.table_virt,
+ chain->pbl_sp.table_phys);
vfree(chain->pbl.pp_addr_tbl);
chain->pbl.pp_addr_tbl = NULL;
@@ -190,7 +189,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
struct device *dev = &cdev->pdev->dev;
struct addr_tbl_entry *addr_tbl;
dma_addr_t phys, pbl_phys;
- void *pbl_virt;
+ __le64 *pbl_virt;
u32 page_cnt, i;
size_t size;
void *virt;
@@ -214,7 +213,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
chain->b_external_pbl = true;
} else {
- size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
+ size = array_size(page_cnt, sizeof(*pbl_virt));
if (unlikely(size == SIZE_MAX))
return -EOVERFLOW;
@@ -225,8 +224,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
if (!pbl_virt)
return -ENOMEM;
- chain->pbl_sp.p_virt_table = pbl_virt;
- chain->pbl_sp.p_phys_table = pbl_phys;
+ chain->pbl_sp.table_virt = pbl_virt;
+ chain->pbl_sp.table_phys = pbl_phys;
+ chain->pbl_sp.table_size = size;
for (i = 0; i < page_cnt; i++) {
virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
@@ -240,8 +240,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
}
/* Fill the PBL table with the physical address of the page */
- *(dma_addr_t *)pbl_virt = phys;
- pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ pbl_virt[i] = cpu_to_le64(phys);
/* Keep the virtual address of the page */
addr_tbl[i].virt_addr = virt;
@@ -366,11 +366,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
- p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
+ qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
- p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
+ qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
@@ -127,8 +127,9 @@ struct qed_chain {
/* Base address of a pre-allocated buffer for pbl */
struct {
- dma_addr_t p_phys_table;
- void *p_virt_table;
+ __le64 *table_virt;
+ dma_addr_t table_phys;
+ size_t table_size;
} pbl_sp;
/* Address of first page of the chain - the address is required
@@ -146,7 +147,6 @@ struct qed_chain {
bool b_external_pbl;
};
-#define QED_CHAIN_PBL_ENTRY_SIZE 8
#define QED_CHAIN_PAGE_SIZE 0x1000
#define ELEMS_PER_PAGE(elem_size) \
@@ -236,7 +236,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{
- return p_chain->pbl_sp.p_phys_table;
+ return p_chain->pbl_sp.table_phys;
}
/**
@@ -527,8 +527,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
- p_chain->pbl_sp.p_phys_table = 0;
- p_chain->pbl_sp.p_virt_table = NULL;
+ p_chain->pbl_sp.table_phys = 0;
+ p_chain->pbl_sp.table_virt = NULL;
p_chain->pbl.pp_addr_tbl = NULL;
}
@@ -569,8 +569,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
dma_addr_t p_phys_pbl,
struct addr_tbl_entry *pp_addr_tbl)
{
- p_chain->pbl_sp.p_phys_table = p_phys_pbl;
- p_chain->pbl_sp.p_virt_table = p_virt_pbl;
+ p_chain->pbl_sp.table_phys = p_phys_pbl;
+ p_chain->pbl_sp.table_virt = p_virt_pbl;
p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
}