From patchwork Sat Jan 29 17:40:22 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Eduard - Gabriel Munteanu X-Patchwork-Id: 516501 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p0THerC9005762 for ; Sat, 29 Jan 2011 17:40:54 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754585Ab1A2Rkw (ORCPT ); Sat, 29 Jan 2011 12:40:52 -0500 Received: from mail-fx0-f46.google.com ([209.85.161.46]:50369 "EHLO mail-fx0-f46.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754216Ab1A2Rku (ORCPT ); Sat, 29 Jan 2011 12:40:50 -0500 Received: by mail-fx0-f46.google.com with SMTP id 20so4372013fxm.19 for ; Sat, 29 Jan 2011 09:40:49 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:sender:from:to:cc:subject:date:message-id :x-mailer:in-reply-to:references:in-reply-to:references; bh=oa9pmV7iYlWc1A/M5/VRTPH5R4KUu09V//l2YAcIx44=; b=xsWfxf+2poTV3iWjBY07oYmUg6chdDbuqL59/llskd2Q2t8+GewbNOfnGnFlC/1SH2 MHoNZlx4puwGGMTScmmjRkmoJNk6k7NA7ZQ5xkzuHmLsR2wpdWHo69DquCh4PkqyPnAe CoPVXGobn77U9Y2Ii98qgU0lv6woy9wKfRkCs= DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=sender:from:to:cc:subject:date:message-id:x-mailer:in-reply-to :references; b=XjGYcMGj9tHp8niQVHiR84VOtVEWur7mvCL9QuaLqr+P1kzJzNsRzjIc7yfNAcN4CO Sztm1JwRxSNXumu1Gw0CJv+Hcu5MoCQ/rm9uvCgcUZzFiA3+fvZoRw9M1cmISenMcOG/ 8pWAwIkWeHlhELI3R9HdbpQM3xxOIzYBZiibY= Received: by 10.223.101.206 with SMTP id d14mr3904956fao.134.1296322849553; Sat, 29 Jan 2011 09:40:49 -0800 (PST) Received: from localhost.localdomain ([188.25.244.101]) by mx.google.com with ESMTPS id 21sm6822801fav.17.2011.01.29.09.40.48 (version=SSLv3 cipher=RC4-MD5); Sat, 29 Jan 2011 09:40:48 -0800 (PST) From: Eduard - Gabriel Munteanu To: joro@8bytes.org Cc: blauwirbel@gmail.com, paul@codesourcery.com, avi@redhat.com, anthony@codemonkey.ws, av1474@comtv.ru, yamahata@valinux.co.jp, kvm@vger.kernel.org, qemu-devel@nongnu.org, Eduard - Gabriel Munteanu Subject: [PATCH 01/13] Generic DMA memory access interface Date: Sat, 29 Jan 2011 19:40:22 +0200 Message-Id: <7ec6f2018811566a4b207c4f5b8d7b8b7342b786.1296321798.git.eduard.munteanu@linux360.ro> X-Mailer: git-send-email 1.7.3.4 In-Reply-To: References: In-Reply-To: References: Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Sat, 29 Jan 2011 17:40:54 +0000 (UTC) diff --git a/Makefile.target b/Makefile.target index e15b1c4..e5817ab 100644 --- a/Makefile.target +++ b/Makefile.target @@ -218,7 +218,7 @@ obj-i386-y += cirrus_vga.o apic.o ioapic.o piix_pci.o obj-i386-y += vmmouse.o vmport.o hpet.o applesmc.o obj-i386-y += device-hotplug.o pci-hotplug.o smbios.o wdt_ib700.o obj-i386-y += debugcon.o multiboot.o -obj-i386-y += pc_piix.o +obj-i386-y += pc_piix.o dma_rw.o obj-i386-$(CONFIG_SPICE) += qxl.o qxl-logger.o qxl-render.o # shared objects diff --git a/hw/dma_rw.c b/hw/dma_rw.c new file mode 100644 index 0000000..ef8e7f8 --- /dev/null +++ b/hw/dma_rw.c @@ -0,0 +1,124 @@ +/* + * Generic DMA memory access interface. + * + * Copyright (c) 2011 Eduard - Gabriel Munteanu + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "dma_rw.h" +#include "range.h" + +static void dma_register_memory_map(DMADevice *dev, + dma_addr_t addr, + dma_addr_t len, + target_phys_addr_t paddr, + DMAInvalidateMapFunc *invalidate, + void *invalidate_opaque) +{ + DMAMemoryMap *map; + + map = qemu_malloc(sizeof(DMAMemoryMap)); + map->addr = addr; + map->len = len; + map->paddr = paddr; + map->invalidate = invalidate; + map->invalidate_opaque = invalidate_opaque; + + QLIST_INSERT_HEAD(&dev->mmu->memory_maps, map, list); +} + +static void dma_unregister_memory_map(DMADevice *dev, + target_phys_addr_t paddr, + dma_addr_t len) +{ + DMAMemoryMap *map; + + QLIST_FOREACH(map, &dev->mmu->memory_maps, list) { + if (map->paddr == paddr && map->len == len) { + QLIST_REMOVE(map, list); + free(map); + } + } +} + +void dma_invalidate_memory_range(DMADevice *dev, + dma_addr_t addr, + dma_addr_t len) +{ + DMAMemoryMap *map; + + QLIST_FOREACH(map, &dev->mmu->memory_maps, list) { + if (ranges_overlap(addr, len, map->addr, map->len)) { + map->invalidate(map->invalidate_opaque); + QLIST_REMOVE(map, list); + free(map); + } + } +} + +void *dma_memory_map(DMADevice *dev, + DMAInvalidateMapFunc *cb, + void *opaque, + dma_addr_t addr, + dma_addr_t *len, + int is_write) +{ + int err; + target_phys_addr_t paddr, plen; + + if (!dev || !dev->mmu) { + return cpu_physical_memory_map(addr, len, is_write); + } + + plen = *len; + err = dev->mmu->translate(dev, addr, &paddr, &plen, is_write); + if (err) { + return NULL; + } + + /* + * If this is true, the virtual region is contiguous, + * but the translated physical region isn't. We just + * clamp *len, much like cpu_physical_memory_map() does. + */ + if (plen < *len) { + *len = plen; + } + + /* We treat maps as remote TLBs to cope with stuff like AIO. */ + if (cb) { + dma_register_memory_map(dev, addr, *len, paddr, cb, opaque); + } + + return cpu_physical_memory_map(paddr, len, is_write); +} + +void dma_memory_unmap(DMADevice *dev, + void *buffer, + dma_addr_t len, + int is_write, + dma_addr_t access_len) +{ + cpu_physical_memory_unmap(buffer, len, is_write, access_len); + if (dev && dev->mmu) { + dma_unregister_memory_map(dev, (target_phys_addr_t) buffer, len); + } +} + diff --git a/hw/dma_rw.h b/hw/dma_rw.h new file mode 100644 index 0000000..bc93511 --- /dev/null +++ b/hw/dma_rw.h @@ -0,0 +1,157 @@ +#ifndef DMA_RW_H +#define DMA_RW_H + +#include "qemu-common.h" + +typedef uint64_t dma_addr_t; + +typedef struct DMAMmu DMAMmu; +typedef struct DMADevice DMADevice; +typedef struct DMAMemoryMap DMAMemoryMap; + +typedef int DMATranslateFunc(DMADevice *dev, + dma_addr_t addr, + dma_addr_t *paddr, + dma_addr_t *len, + int is_write); + +typedef void DMAInvalidateMapFunc(void *); + +struct DMAMmu { + DeviceState *iommu; + DMATranslateFunc *translate; + QLIST_HEAD(memory_maps, DMAMemoryMap) memory_maps; +}; + +struct DMADevice { + DMAMmu *mmu; +}; + +struct DMAMemoryMap { + dma_addr_t addr; + dma_addr_t len; + target_phys_addr_t paddr; + DMAInvalidateMapFunc *invalidate; + void *invalidate_opaque; + + QLIST_ENTRY(DMAMemoryMap) list; +}; + +static inline void dma_memory_rw(DMADevice *dev, + dma_addr_t addr, + void *buf, + dma_addr_t len, + int is_write) +{ + dma_addr_t paddr, plen; + int err; + + /* + * Fast-path non-iommu. + * More importantly, makes it obvious what this function does. + */ + if (!dev || !dev->mmu) { + cpu_physical_memory_rw(addr, buf, plen, is_write); + return; + } + + while (len) { + err = dev->mmu->translate(dev, addr, &paddr, &plen, is_write); + if (err) { + return; + } + + /* The translation might be valid for larger regions. */ + if (plen > len) { + plen = len; + } + + cpu_physical_memory_rw(paddr, buf, plen, is_write); + + len -= plen; + addr += plen; + buf += plen; + } +} + +static inline void dma_memory_read(DMADevice *dev, + dma_addr_t addr, + void *buf, + dma_addr_t len) +{ + dma_memory_rw(dev, addr, buf, len, 0); +} + +static inline void dma_memory_write(DMADevice *dev, + dma_addr_t addr, + const void *buf, + dma_addr_t len) +{ + dma_memory_rw(dev, addr, (void *) buf, len, 1); +} + +void *dma_memory_map(DMADevice *dev, + DMAInvalidateMapFunc *cb, + void *opaque, + dma_addr_t addr, + dma_addr_t *len, + int is_write); +void dma_memory_unmap(DMADevice *dev, + void *buffer, + dma_addr_t len, + int is_write, + dma_addr_t access_len); + + +void dma_invalidate_memory_range(DMADevice *dev, + dma_addr_t addr, + dma_addr_t len); + + +#define DEFINE_DMA_LD(suffix, size) \ +static inline uint##size##_t \ +dma_ld##suffix(DMADevice *dev, dma_addr_t addr) \ +{ \ + int err; \ + dma_addr_t paddr, plen; \ + \ + if (!dev || !dev->mmu) { \ + return ld##suffix##_phys(addr); \ + } \ + \ + err = dev->mmu->translate(dev, addr, &paddr, &plen, 0); \ + if (err || (plen < size / 8)) \ + return 0; \ + \ + return ld##suffix##_phys(paddr); \ +} + +#define DEFINE_DMA_ST(suffix, size) \ +static inline void \ +dma_st##suffix(DMADevice *dev, dma_addr_t addr, uint##size##_t val) \ +{ \ + int err; \ + target_phys_addr_t paddr, plen; \ + \ + if (!dev || !dev->mmu) { \ + st##suffix##_phys(addr, val); \ + return; \ + } \ + err = dev->mmu->translate(dev, addr, &paddr, &plen, 1); \ + if (err || (plen < size / 8)) \ + return; \ + \ + st##suffix##_phys(paddr, val); \ +} + +DEFINE_DMA_LD(ub, 8) +DEFINE_DMA_LD(uw, 16) +DEFINE_DMA_LD(l, 32) +DEFINE_DMA_LD(q, 64) + +DEFINE_DMA_ST(b, 8) +DEFINE_DMA_ST(w, 16) +DEFINE_DMA_ST(l, 32) +DEFINE_DMA_ST(q, 64) + +#endif