@@ -1157,32 +1157,6 @@ void qemu_mutex_unlock_ramlist(void)
}
#ifdef __linux__
-
-#include <sys/vfs.h>
-
-#define HUGETLBFS_MAGIC 0x958458f6
-
-static long gethugepagesize(const char *path, Error **errp)
-{
- struct statfs fs;
- int ret;
-
- do {
- ret = statfs(path, &fs);
- } while (ret != 0 && errno == EINTR);
-
- if (ret != 0) {
- error_setg_errno(errp, errno, "failed to get page size of file %s",
- path);
- return 0;
- }
-
- if (fs.f_type != HUGETLBFS_MAGIC)
- fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
-
- return fs.f_bsize;
-}
-
static void *file_ram_alloc(RAMBlock *block,
ram_addr_t memory,
const char *path,
@@ -1193,20 +1167,24 @@ static void *file_ram_alloc(RAMBlock *block,
char *c;
void *area;
int fd;
- uint64_t hpagesize;
- Error *local_err = NULL;
+ uint64_t pagesize;
- hpagesize = gethugepagesize(path, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ pagesize = qemu_file_get_page_size(path);
+ if (!pagesize) {
+ error_setg(errp, "can't get page size for %s", path);
goto error;
}
- block->mr->align = hpagesize;
- if (memory < hpagesize) {
+ if (pagesize == getpagesize()) {
+ fprintf(stderr, "Memory is not allocated from HugeTlbfs.\n");
+ }
+
+ block->mr->align = pagesize;
+
+ if (memory < pagesize) {
error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than huge page size 0x%" PRIx64,
- memory, hpagesize);
+ "or larger than page size 0x%" PRIx64,
+ memory, pagesize);
goto error;
}
@@ -1230,14 +1208,14 @@ static void *file_ram_alloc(RAMBlock *block,
fd = mkstemp(filename);
if (fd < 0) {
error_setg_errno(errp, errno,
- "unable to create backing store for hugepages");
+ "unable to create backing store for path %s", path);
g_free(filename);
goto error;
}
unlink(filename);
g_free(filename);
- memory = ROUND_UP(memory, hpagesize);
+ memory = ROUND_UP(memory, pagesize);
/*
* ftruncate is not supported by hugetlbfs in older
@@ -1249,10 +1227,10 @@ static void *file_ram_alloc(RAMBlock *block,
perror("ftruncate");
}
- area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
+ area = qemu_ram_mmap(fd, memory, pagesize, block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
- "unable to map backing store for hugepages");
+ "unable to map backing store for path %s", path);
close(fd);
goto error;
}
Currently file_ram_alloc() is designed for hugetlbfs, however, the memory of nvdimm can come from either raw pmem device eg, /dev/pmem, or the file locates at DAX enabled filesystem So this patch let it work on any kind of path Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> --- exec.c | 56 +++++++++++++++++--------------------------------------- 1 file changed, 17 insertions(+), 39 deletions(-)