提交 6eebf958 编写于 作者: P Paolo Bonzini 提交者: Anthony Liguori

osdep, kvm: rename low-level RAM allocation functions

This is preparatory to the introduction of a separate freeing API.
Reported-by: NAmos Kong <akong@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: NAmos Kong <akong@redhat.com>
Message-id: 1368454796-14989-2-git-send-email-pbonzini@redhat.com
Signed-off-by: NAnthony Liguori <aliguori@us.ibm.com>
上级 d34dc45d
...@@ -78,16 +78,15 @@ avoided. ...@@ -78,16 +78,15 @@ avoided.
Use of the malloc/free/realloc/calloc/valloc/memalign/posix_memalign Use of the malloc/free/realloc/calloc/valloc/memalign/posix_memalign
APIs is not allowed in the QEMU codebase. Instead of these routines, APIs is not allowed in the QEMU codebase. Instead of these routines,
use the GLib memory allocation routines g_malloc/g_malloc0/g_new/ use the GLib memory allocation routines g_malloc/g_malloc0/g_new/
g_new0/g_realloc/g_free or QEMU's qemu_vmalloc/qemu_memalign/qemu_vfree g_new0/g_realloc/g_free or QEMU's qemu_memalign/qemu_blockalign/qemu_vfree
APIs. APIs.
Please note that g_malloc will exit on allocation failure, so there Please note that g_malloc will exit on allocation failure, so there
is no need to test for failure (as you would have to with malloc). is no need to test for failure (as you would have to with malloc).
Calling g_malloc with a zero size is valid and will return NULL. Calling g_malloc with a zero size is valid and will return NULL.
Memory allocated by qemu_vmalloc or qemu_memalign must be freed with Memory allocated by qemu_memalign or qemu_blockalign must be freed with
qemu_vfree, since breaking this will cause problems on Win32 and user qemu_vfree, since breaking this will cause problems on Win32.
emulators.
4. String manipulation 4. String manipulation
......
...@@ -1062,7 +1062,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, ...@@ -1062,7 +1062,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
#if defined (__linux__) && !defined(TARGET_S390X) #if defined (__linux__) && !defined(TARGET_S390X)
new_block->host = file_ram_alloc(new_block, size, mem_path); new_block->host = file_ram_alloc(new_block, size, mem_path);
if (!new_block->host) { if (!new_block->host) {
new_block->host = qemu_vmalloc(size); new_block->host = qemu_anon_ram_alloc(size);
memory_try_enable_merging(new_block->host, size); memory_try_enable_merging(new_block->host, size);
} }
#else #else
...@@ -1074,9 +1074,9 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, ...@@ -1074,9 +1074,9 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
xen_ram_alloc(new_block->offset, size, mr); xen_ram_alloc(new_block->offset, size, mr);
} else if (kvm_enabled()) { } else if (kvm_enabled()) {
/* some s390/kvm configurations have special constraints */ /* some s390/kvm configurations have special constraints */
new_block->host = kvm_vmalloc(size); new_block->host = kvm_ram_alloc(size);
} else { } else {
new_block->host = qemu_vmalloc(size); new_block->host = qemu_anon_ram_alloc(size);
} }
memory_try_enable_merging(new_block->host, size); memory_try_enable_merging(new_block->host, size);
} }
......
...@@ -96,7 +96,7 @@ typedef signed int int_fast16_t; ...@@ -96,7 +96,7 @@ typedef signed int int_fast16_t;
int qemu_daemon(int nochdir, int noclose); int qemu_daemon(int nochdir, int noclose);
void *qemu_memalign(size_t alignment, size_t size); void *qemu_memalign(size_t alignment, size_t size);
void *qemu_vmalloc(size_t size); void *qemu_anon_ram_alloc(size_t size);
void qemu_vfree(void *ptr); void qemu_vfree(void *ptr);
#define QEMU_MADV_INVALID -1 #define QEMU_MADV_INVALID -1
......
...@@ -142,8 +142,8 @@ int kvm_init_vcpu(CPUState *cpu); ...@@ -142,8 +142,8 @@ int kvm_init_vcpu(CPUState *cpu);
int kvm_cpu_exec(CPUArchState *env); int kvm_cpu_exec(CPUArchState *env);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void *kvm_vmalloc(ram_addr_t size); void *kvm_ram_alloc(ram_addr_t size);
void *kvm_arch_vmalloc(ram_addr_t size); void *kvm_arch_ram_alloc(ram_addr_t size);
#endif #endif
void kvm_setup_guest_memory(void *start, size_t size); void kvm_setup_guest_memory(void *start, size_t size);
......
...@@ -1790,17 +1790,17 @@ int kvm_has_intx_set_mask(void) ...@@ -1790,17 +1790,17 @@ int kvm_has_intx_set_mask(void)
return kvm_state->intx_set_mask; return kvm_state->intx_set_mask;
} }
void *kvm_vmalloc(ram_addr_t size) void *kvm_ram_alloc(ram_addr_t size)
{ {
#ifdef TARGET_S390X #ifdef TARGET_S390X
void *mem; void *mem;
mem = kvm_arch_vmalloc(size); mem = kvm_arch_ram_alloc(size);
if (mem) { if (mem) {
return mem; return mem;
} }
#endif #endif
return qemu_vmalloc(size); return qemu_anon_ram_alloc(size);
} }
void kvm_setup_guest_memory(void *start, size_t size) void kvm_setup_guest_memory(void *start, size_t size)
......
...@@ -332,7 +332,7 @@ static void *legacy_s390_alloc(ram_addr_t size) ...@@ -332,7 +332,7 @@ static void *legacy_s390_alloc(ram_addr_t size)
return mem; return mem;
} }
void *kvm_arch_vmalloc(ram_addr_t size) void *kvm_arch_ram_alloc(ram_addr_t size)
{ {
/* Can we use the standard allocation ? */ /* Can we use the standard allocation ? */
if (kvm_check_extension(kvm_state, KVM_CAP_S390_GMAP) && if (kvm_check_extension(kvm_state, KVM_CAP_S390_GMAP) &&
......
...@@ -32,7 +32,7 @@ g_free(void *ptr) "ptr %p" ...@@ -32,7 +32,7 @@ g_free(void *ptr) "ptr %p"
# osdep.c # osdep.c
qemu_memalign(size_t alignment, size_t size, void *ptr) "alignment %zu size %zu ptr %p" qemu_memalign(size_t alignment, size_t size, void *ptr) "alignment %zu size %zu ptr %p"
qemu_vmalloc(size_t size, void *ptr) "size %zu ptr %p" qemu_anon_ram_alloc(size_t size, void *ptr) "size %zu ptr %p"
qemu_vfree(void *ptr) "ptr %p" qemu_vfree(void *ptr) "ptr %p"
# hw/virtio.c # hw/virtio.c
......
...@@ -101,7 +101,7 @@ void *qemu_memalign(size_t alignment, size_t size) ...@@ -101,7 +101,7 @@ void *qemu_memalign(size_t alignment, size_t size)
} }
/* alloc shared memory pages */ /* alloc shared memory pages */
void *qemu_vmalloc(size_t size) void *qemu_anon_ram_alloc(size_t size)
{ {
size_t align = QEMU_VMALLOC_ALIGN; size_t align = QEMU_VMALLOC_ALIGN;
size_t total = size + align - getpagesize(); size_t total = size + align - getpagesize();
...@@ -125,7 +125,7 @@ void *qemu_vmalloc(size_t size) ...@@ -125,7 +125,7 @@ void *qemu_vmalloc(size_t size)
munmap(ptr + size, total - size); munmap(ptr + size, total - size);
} }
trace_qemu_vmalloc(size, ptr); trace_qemu_anon_ram_alloc(size, ptr);
return ptr; return ptr;
} }
......
...@@ -53,7 +53,7 @@ void *qemu_memalign(size_t alignment, size_t size) ...@@ -53,7 +53,7 @@ void *qemu_memalign(size_t alignment, size_t size)
return ptr; return ptr;
} }
void *qemu_vmalloc(size_t size) void *qemu_anon_ram_alloc(size_t size)
{ {
void *ptr; void *ptr;
...@@ -64,7 +64,7 @@ void *qemu_vmalloc(size_t size) ...@@ -64,7 +64,7 @@ void *qemu_vmalloc(size_t size)
abort(); abort();
} }
ptr = qemu_oom_check(VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE)); ptr = qemu_oom_check(VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE));
trace_qemu_vmalloc(size, ptr); trace_qemu_anon_ram_alloc(size, ptr);
return ptr; return ptr;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册