@@ -564,7 +564,6 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
static const char *names[vmw_dma_map_max] = {
- [vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
@@ -598,8 +597,7 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
int ret = 0;
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
- if (dev_priv->map_mode != vmw_dma_phys &&
- (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+ if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
@@ -281,7 +281,6 @@ struct vmw_res_cache_entry {
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
*/
enum vmw_dma_map_mode {
- vmw_dma_phys, /* Use physical page addresses */
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
@@ -295,11 +295,6 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
* pointed to by @viter. Functions are selected depending on the
* current mapping mode.
*/
-static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
-{
- return page_to_phys(viter->pages[viter->i]);
-}
-
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
{
return viter->addrs[viter->i];
@@ -329,10 +324,6 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
viter->page = &__vmw_piter_non_sg_page;
viter->pages = vsgt->pages;
switch (vsgt->mode) {
- case vmw_dma_phys:
- viter->next = &__vmw_piter_non_sg_next;
- viter->dma_address = &__vmw_piter_phys_addr;
- break;
case vmw_dma_alloc_coherent:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_dma_addr;