@@ -46,3 +46,4 @@ virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s g
virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d"
virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d"
virtio_balloon_to_target(uint64_t target, uint32_t num_pages) "balloon target: 0x%"PRIx64" num_pages: %d"
+virtio_balloon_hinting_request(unsigned long pfn, unsigned int num_pages) "Guest page hinting request: %lu num_pages: %d"
@@ -33,6 +33,13 @@
#define BALLOON_PAGE_SIZE (1 << VIRTIO_BALLOON_PFN_SHIFT)
+struct guest_pages {
+ unsigned long pfn;
+ unsigned int order;
+};
+
+void page_hinting_request(uint64_t addr, uint32_t len);
+
static void balloon_page(void *addr, int deflate)
{
if (!qemu_balloon_is_inhibited()) {
@@ -207,6 +214,77 @@ static void balloon_stats_set_poll_interval(Object *obj, Visitor *v,
balloon_stats_change_timer(s, 0);
}
+static void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, Error **errp)
+{
+ MemoryRegionSection mrs = memory_region_find(get_system_memory(),
+ addr, 1);
+
+ if (!mrs.mr) {
+ error_setg(errp, "No memory is mapped at address 0x%" HWADDR_PRIx, addr);
+ return NULL;
+ }
+
+ if (!memory_region_is_ram(mrs.mr) && !memory_region_is_romd(mrs.mr)) {
+ error_setg(errp, "Memory at address 0x%" HWADDR_PRIx "is not RAM", addr);
+ memory_region_unref(mrs.mr);
+ return NULL;
+ }
+
+ *p_mr = mrs.mr;
+ return qemu_map_ram_ptr(mrs.mr->ram_block, mrs.offset_within_region);
+}
+
+void page_hinting_request(uint64_t addr, uint32_t len)
+{
+ Error *local_err = NULL;
+ MemoryRegion *mr = NULL;
+ void *hvaddr;
+ int ret = 0;
+ struct guest_pages *guest_obj;
+ int i = 0;
+ void *hvaddr_to_free;
+ unsigned long pfn, pfn_end;
+ uint64_t gpaddr_to_free;
+
+ hvaddr = gpa2hva(&mr, addr, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return;
+ }
+ guest_obj = hvaddr;
+
+ while (i < len) {
+ pfn = guest_obj[i].pfn;
+ pfn_end = guest_obj[i].pfn + (1 << guest_obj[i].order) - 1;
+ trace_virtio_balloon_hinting_request(pfn,(1 << guest_obj[i].order));
+ while (pfn <= pfn_end) {
+ gpaddr_to_free = pfn << VIRTIO_BALLOON_PFN_SHIFT;
+ hvaddr_to_free = gpa2hva(&mr, gpaddr_to_free, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ return;
+ }
+ ret = qemu_madvise((void *)hvaddr_to_free, 4096, QEMU_MADV_DONTNEED);
+ if (ret == -1)
+ printf("\n%d:%s Error: Madvise failed with error:%d\n", __LINE__, __func__, ret);
+ pfn++;
+ }
+ i++;
+ }
+}
+
+static void virtio_balloon_page_hinting(VirtIODevice *vdev, VirtQueue *vq)
+{
+ uint64_t addr;
+ uint32_t len;
+ VirtQueueElement elem = {};
+
+ pop_hinting_addr(vq, &addr, &len);
+ page_hinting_request(addr, len);
+ virtqueue_push(vq, &elem, 0);
+ virtio_notify(vdev, vq);
+}
+
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
@@ -376,6 +454,7 @@ static uint64_t virtio_balloon_get_features(VirtIODevice *vdev, uint64_t f,
VirtIOBalloon *dev = VIRTIO_BALLOON(vdev);
f |= dev->host_features;
virtio_add_feature(&f, VIRTIO_BALLOON_F_STATS_VQ);
+ virtio_add_feature(&f, VIRTIO_BALLOON_F_HINTING);
return f;
}
@@ -445,6 +524,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
+ s->hvq = virtio_add_queue(vdev, 128, virtio_balloon_page_hinting);
reset_stats(s);
}
@@ -488,6 +568,8 @@ static void virtio_balloon_instance_init(Object *obj)
object_property_add(obj, "guest-stats", "guest statistics",
balloon_stats_get_all, NULL, NULL, s, NULL);
+ object_property_add(obj, "guest-page-hinting", "guest page hinting",
+ NULL, NULL, NULL, s, NULL);
object_property_add(obj, "guest-stats-polling-interval", "int",
balloon_stats_get_poll_interval,
@@ -847,6 +847,31 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
return elem;
}
+void pop_hinting_addr(VirtQueue *vq, uint64_t *addr, uint32_t *len)
+{
+ VRingMemoryRegionCaches *caches;
+ VRingDesc desc;
+ MemoryRegionCache *desc_cache;
+ VirtIODevice *vdev = vq->vdev;
+ unsigned int head, max;
+
+ max = vq->vring.num;
+ if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
+ virtio_error(vdev, "Unable to read head");
+ return;
+ }
+
+ caches = vring_get_region_caches(vq);
+ if (caches->desc.len < max * sizeof(VRingDesc)) {
+ virtio_error(vdev, "Cannot map descriptor ring");
+ return;
+ }
+ desc_cache = &caches->desc;
+ vring_desc_read(vdev, &desc, desc_cache, head);
+ *addr = desc.addr;
+ *len = desc.len;
+}
+
void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
@@ -23,6 +23,7 @@
#define LEGACY_VIRTIO_IS_BIENDIAN 1
#endif
+void pop_hinting_addr(VirtQueue *vq, uint64_t *addr, uint32_t *len);
static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
{
#if defined(LEGACY_VIRTIO_IS_BIENDIAN)
@@ -32,7 +32,7 @@ typedef struct virtio_balloon_stat_modern {
typedef struct VirtIOBalloon {
VirtIODevice parent_obj;
- VirtQueue *ivq, *dvq, *svq;
+ VirtQueue *ivq, *dvq, *svq, *hvq;
uint32_t num_pages;
uint32_t actual;
uint64_t stats[VIRTIO_BALLOON_S_NR];
@@ -34,6 +34,7 @@
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
+#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12