提交 9cc9a6a2 编写于 作者: S sosnin-vladimir

Add malloc_info, malloc_stats_print, mallinfo2, malloc_iterate, malloc_enable,...

Add malloc_info, malloc_stats_print, mallinfo2, malloc_iterate, malloc_enable, malloc_disable, mallopt, malloc_backtrace for backward compatibility with jemalloc. Implemented by Olefirenko Egor, Polyakov Maxim, Frolikov Boris
Signed-off-by: Nsosnin-vladimir <sosninvladimir@huawei.com>
上级 43f04932
......@@ -56,6 +56,7 @@ declare_args() {
if (!is_standard_system) {
enable_musl_log = false
}
musl_iterate_and_stats_api = true
musl_secure_level = 1
}
......
......@@ -515,7 +515,7 @@ musl_src_file = [
"src/malloc/malloc.c",
"src/malloc/malloc_random.c",
"src/malloc/malloc_usable_size.c",
"src/malloc/mallocng/mallinfo.c",
"src/malloc/stats.c",
"src/malloc/memalign.c",
"src/malloc/posix_memalign.c",
"src/math/__cos.c",
......@@ -2017,6 +2017,7 @@ musl_src_porting_file = [
"include/info/application_target_sdk_version.h",
"include/info/device_api_version.h",
"include/info/fatal_message.h",
"include/malloc.h",
"include/pthread.h",
"include/fcntl.h",
"include/poll.h",
......@@ -2065,6 +2066,8 @@ musl_src_porting_file = [
"src/linux/reboot.c",
"src/linux/tgkill.c",
"src/malloc/malloc.c",
"src/malloc/memalign.c",
"src/malloc/stats.c",
"src/malloc/malloc_random.c",
"src/multibyte/wcsnrtombs.c",
"src/network/inet_legacy.c",
......
......@@ -281,6 +281,10 @@ template("musl_libs") {
defines += [ "MALLOC_SECURE_ALL" ]
}
if (musl_iterate_and_stats_api) {
defines += [ "MUSL_ITERATE_AND_STATS_API" ]
}
foreach(s, sources_orig) {
sources += [ "${target_out_dir}/${musl_ported_dir}/${s}" ]
}
......@@ -387,6 +391,11 @@ template("musl_libs") {
"src/env/__stack_chk_fail.c",
]
defines = []
if (musl_iterate_and_stats_api) {
defines += [ "MUSL_ITERATE_AND_STATS_API" ]
}
if (musl_arch == "arm") {
sources_orig += [ "src/thread/${musl_arch}/__set_thread_area.c" ]
} else if (musl_arch == "aarch64") {
......
#ifndef _MALLOC_H
#define _MALLOC_H
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
#define __NEED_size_t
#define __NEED_ssize_t
#define __NEED_uintptr_t
#include <bits/alltypes.h>
void *malloc (size_t);
void *calloc (size_t, size_t);
void *realloc (void *, size_t);
void free (void *);
void *valloc (size_t);
void *memalign(size_t, size_t);
size_t malloc_usable_size(void *);
struct mallinfo {
int arena;
int ordblks;
int smblks;
int hblks;
int hblkhd;
int usmblks;
int fsmblks;
int uordblks;
int fordblks;
int keepcost;
};
struct mallinfo mallinfo(void);
struct mallinfo2 {
size_t arena;
size_t ordblks;
size_t smblks;
size_t hblks;
size_t hblkhd;
size_t usmblks;
size_t fsmblks;
size_t uordblks;
size_t fordblks;
size_t keepcost;
};
struct mallinfo2 mallinfo2(void);
int malloc_iterate(void* base, size_t size, void (*callback)(void* base, size_t size, void* arg), void* arg);
void malloc_disable(void);
void malloc_enable(void);
int malloc_info(int options, FILE* fp);
void malloc_stats_print(void (*write_cb) (void *, const char *), void *cbopaque, const char *opts);
int mallopt(int param, int value);
ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
#ifdef __cplusplus
}
#endif
#endif
......@@ -6,8 +6,12 @@
#include "syscall.h"
#include "atomic.h"
#include "libc.h"
#include "pthread.h"
#include "sys/mman.h"
#include "malloc_impl.h"
#include "pthread_impl.h"
static void dummy(void) {}
weak_alias(dummy, _init);
......@@ -95,6 +99,14 @@ static int libc_start_main_stage2(int (*main)(int,char **,char **), int argc, ch
#endif
errno = 0;
#ifdef MUSL_ITERATE_AND_STATS_API
__init_occupied_bin_key_once();
occupied_bin_t *occupied_bin = internal_calloc(sizeof(occupied_bin_t), 1);
if (occupied_bin == NULL) return ENOMEM;
pthread_setspecific(__get_occupied_bin_key(), occupied_bin);
#endif
libc.initialized = 1;
/* Pass control to the application */
exit(main(argc, argv, envp));
return 0;
......
#ifndef _MUSL_MALLOC_H
#define _MUSL_MALLOC_H
#include "malloc.h"
#ifdef __cplusplus
extern "C" {
#endif
......@@ -21,6 +23,19 @@ void *__libc_valloc(size_t);
void *__libc_memalign(size_t, size_t);
size_t __libc_malloc_usable_size(void *);
struct mallinfo2 __libc_mallinfo2(void);
int __libc_malloc_iterate(void* base, size_t size, void (*callback)(void* base, size_t size, void* arg), void* arg);
void __libc_malloc_disable(void);
void __libc_malloc_enable(void);
int __libc_malloc_info(int options, FILE* fp);
void __libc_malloc_stats_print(void (*write_cb) (void *, const char *), void *cbopaque, const char *opts);
int __libc_mallopt(int param, int value);
ssize_t __libc_malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
#ifdef __cplusplus
}
#endif
......
......@@ -18,6 +18,17 @@ typedef void* (*MallocVallocType)(size_t);
typedef void (*MallocFreeType)(void*);
typedef void* (*MallocMemalignType)(size_t, size_t);
typedef size_t (*MallocMallocUsableSizeType)(void*);
typedef struct mallinfo (*MallinfoType)(void);
typedef struct mallinfo2 (*Mallinfo2Type)(void);
typedef int (*MallocIterateType)(void*, size_t, void (*callback)(void*, size_t, void*), void*);
typedef void (*MallocDisableType)(void);
typedef void (*MallocEnableType)(void);
typedef int (*MallocInfoType)(int, FILE*);
typedef void (*MallocStatsPrintType)(void (*) (void *, const char *), void *, const char *);
typedef int (*MalloptType)(int, int);
typedef ssize_t (*MallocBacktraceType)(void*, uintptr_t*, size_t);
typedef bool (*GetHookFlagType)();
typedef bool (*SetHookFlagType)(bool);
......@@ -31,6 +42,15 @@ struct MallocDispatchType {
MallocFreeType free;
MallocMemalignType memalign;
MallocMallocUsableSizeType malloc_usable_size;
MallinfoType mallinfo;
Mallinfo2Type mallinfo2;
MallocIterateType malloc_iterate;
MallocDisableType malloc_disable;
MallocEnableType malloc_enable;
MallocInfoType malloc_info;
MallocStatsPrintType malloc_stats_print;
MalloptType mallopt;
MallocBacktraceType malloc_backtrace;
GetHookFlagType get_hook_flag;
SetHookFlagType set_hook_flag;
};
......
......@@ -36,6 +36,7 @@ struct __libc {
int can_do_threads;
int threaded;
int secure;
int initialized;
volatile int threads_minus_1;
size_t *auxv;
struct tls_module *tls_head;
......
......@@ -2,6 +2,7 @@
#define MALLOC_IMPL_H
#include <sys/mman.h>
#include "pthread.h"
#include "malloc_config.h"
hidden void *__expand_heap(size_t *);
......@@ -10,11 +11,23 @@ hidden void __malloc_donate(char *, char *);
hidden void *__memalign(size_t, size_t);
typedef struct occupied_bin_s {
struct chunk *head, *tail;
volatile int lock[2];
} occupied_bin_t;
struct chunk {
size_t psize, csize;
#ifdef MUSL_ITERATE_AND_STATS_API
occupied_bin_t *bin;
#endif
#ifdef MALLOC_RED_ZONE
size_t usize;
size_t state;
#endif
#ifdef MUSL_ITERATE_AND_STATS_API
struct chunk *next_occupied, *prev_occupied;
#endif
struct chunk *next, *prev;
};
......@@ -28,17 +41,43 @@ struct bin {
#endif
};
#ifdef MUSL_ITERATE_AND_STATS_API
typedef void (*malloc_iterate_callback)(void* base, size_t size, void* arg);
hidden occupied_bin_t *__get_occupied_bin(struct __pthread *p);
hidden occupied_bin_t *__get_current_occupied_bin();
hidden void __merge_bin_chunks(occupied_bin_t *target_bin, occupied_bin_t *source_bin);
hidden void __init_occupied_bin_key_once(void);
hidden void __push_chunk(struct chunk *c);
hidden void __pop_chunk(struct chunk *c);
hidden occupied_bin_t *__get_detached_occupied_bin(void);
hidden pthread_key_t __get_occupied_bin_key(void);
hidden size_t __get_total_heap_space(void);
#endif
#define BINS_COUNT 64
#define SIZE_MASK (-SIZE_ALIGN)
#ifndef MALLOC_RED_ZONE
#define SIZE_ALIGN (4*sizeof(size_t))
#define OVERHEAD (2*sizeof(size_t))
#ifdef MUSL_ITERATE_AND_STATS_API
#define OCCUPIED_LIST_OVERHEAD (2*sizeof(void*))
#define ITERATE_AND_STATS_OVERHEAD (sizeof(void*) + OCCUPIED_LIST_OVERHEAD)
#else
#define ITERATE_AND_STATS_OVERHEAD (0)
#endif
#ifndef MALLOC_RED_ZONE
#define SIZE_ALIGN (8*sizeof(size_t))
#define OVERHEAD (4*sizeof(size_t))
#define OVERHEAD (2*sizeof(size_t) + ITERATE_AND_STATS_OVERHEAD)
#else
#define SIZE_ALIGN (16*sizeof(size_t))
#define OVERHEAD (4*sizeof(size_t) + ITERATE_AND_STATS_OVERHEAD)
#endif
#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
#ifndef MALLOC_RED_ZONE
#define DONTCARE 16
#define DONTCARE OVERHEAD
#else
#define DONTCARE OVERHEAD
#define POINTER_USAGE (2*sizeof(void *))
......
#define _GNU_SOURCE
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <stdint.h>
#include <errno.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include "libc.h"
#include "atomic.h"
#include "pthread_impl.h"
#include "malloc_impl.h"
#include "malloc_random.h"
#include <sys/prctl.h>
#if defined(__GNUC__) && defined(__PIC__)
#define inline inline __attribute__((always_inline))
#endif
#ifdef MUSL_ITERATE_AND_STATS_API
pthread_key_t occupied_bin_key;
occupied_bin_t detached_occupied_bin;
/* Usable memory only, excluding overhead for chunks */
size_t total_heap_space = 0;
volatile int total_heap_space_inc_lock[2];
volatile int pop_merge_lock[2];
occupied_bin_t *__get_detached_occupied_bin(void) {
return &detached_occupied_bin;
}
pthread_key_t __get_detached_occupied_bin_key(void) {
return occupied_bin_key;
}
size_t __get_total_heap_space(void) {
return total_heap_space;
}
static pthread_once_t occupied_bin_key_is_initialized = PTHREAD_ONCE_INIT;
static void occupied_bin_destructor(void *occupied_bin)
{
internal_free(occupied_bin);
}
static void init_occupied_bin_key(void)
{
pthread_key_create(&occupied_bin_key, occupied_bin_destructor);
}
void __init_occupied_bin_key_once(void)
{
pthread_once(&occupied_bin_key_is_initialized, init_occupied_bin_key);
}
occupied_bin_t *__get_occupied_bin(struct __pthread *p)
{
__init_occupied_bin_key_once();
return p->tsd[occupied_bin_key];
}
occupied_bin_t *__get_current_occupied_bin()
{
return __get_occupied_bin(__pthread_self());
}
pthread_key_t __get_occupied_bin_key() {
return occupied_bin_key;
}
#endif
#ifdef HOOK_ENABLE
void *__libc_malloc(size_t);
void __libc_free(void *p);
......@@ -23,14 +77,14 @@ void __libc_free(void *p);
static struct {
volatile uint64_t binmap;
struct bin bins[64];
struct bin bins[BINS_COUNT];
volatile int free_lock[2];
#ifdef MALLOC_FREELIST_QUARANTINE
struct bin quarantine[QUARANTINE_NUM];
size_t quarantined_count[QUARANTINE_NUM];
size_t quarantined_size[QUARANTINE_NUM];
#ifdef MALLOC_RED_ZONE
char poison[64];
char poison[BINS_COUNT];
volatile int poison_lock[2];
int poison_count_down;
#endif
......@@ -65,6 +119,174 @@ static inline void unlock(volatile int *lk)
}
}
#ifdef MUSL_ITERATE_AND_STATS_API
void __merge_bin_chunks(occupied_bin_t *target_bin, occupied_bin_t *source_bin)
{
if (!libc.initialized) {
return;
}
lock(pop_merge_lock);
lock(target_bin->lock);
lock(source_bin->lock);
if (target_bin->head == NULL) {
target_bin->head = source_bin->head;
target_bin->tail = source_bin->tail;
} else {
target_bin->tail->next_occupied = source_bin->head;
if (source_bin->head != NULL) {
source_bin->head->prev_occupied = target_bin->tail;
target_bin->tail = source_bin->tail;
}
}
for (struct chunk *c = source_bin->head; c != NULL; c = c->next_occupied) {
c->bin = target_bin;
}
unlock(source_bin->lock);
unlock(target_bin->lock);
unlock(pop_merge_lock);
}
void __push_chunk(struct chunk *c)
{
c->prev_occupied = c->next_occupied = NULL;
c->bin = NULL;
if (!libc.initialized) {
return;
}
occupied_bin_t *occupied_bin = __get_current_occupied_bin();
c->bin = occupied_bin;
if (c->bin == NULL) {
return;
}
lock(occupied_bin->lock);
if (occupied_bin->head != NULL) {
occupied_bin->head->prev_occupied = c;
c->next_occupied = occupied_bin->head;
} else {
occupied_bin->tail = c;
}
occupied_bin->head = c;
unlock(occupied_bin->lock);
}
void __pop_chunk(struct chunk *c)
{
if (!libc.initialized) {
return;
}
lock(pop_merge_lock);
occupied_bin_t *occupied_bin = c->bin;
if (occupied_bin == NULL) {
unlock(pop_merge_lock);
return;
}
lock(occupied_bin->lock);
if (c == occupied_bin->head) {
occupied_bin->head = c->next_occupied;
} else {
c->prev_occupied->next_occupied = c->next_occupied;
}
if (c == occupied_bin->tail) {
occupied_bin->tail = c->prev_occupied;
} else {
c->next_occupied->prev_occupied = c->prev_occupied;
}
unlock(occupied_bin->lock);
unlock(pop_merge_lock);
}
#endif
void malloc_disable(void)
{
#ifdef MUSL_ITERATE_AND_STATS_API
lock(mal.free_lock);
lock(total_heap_space_inc_lock);
for (size_t i = 0; i < BINS_COUNT; ++i) {
lock(mal.bins[i].lock);
}
__tl_lock();
struct __pthread *self, *it;
self = it = __pthread_self();
do {
occupied_bin_t *occupied_bin = __get_occupied_bin(it);
lock(occupied_bin->lock);
it = it->next;
} while (it != self);
lock(detached_occupied_bin.lock);
#endif
}
void malloc_enable(void)
{
#ifdef MUSL_ITERATE_AND_STATS_API
struct __pthread *self, *it;
self = it = __pthread_self();
do {
occupied_bin_t *occupied_bin = __get_occupied_bin(it);
unlock(occupied_bin->lock);
it = it->next;
} while (it != self);
unlock(detached_occupied_bin.lock);
__tl_unlock();
for (size_t i = 0; i < BINS_COUNT; ++i) {
unlock(mal.bins[i].lock);
}
unlock(total_heap_space_inc_lock);
unlock(mal.free_lock);
#endif
}
#ifdef MUSL_ITERATE_AND_STATS_API
typedef struct iterate_info_s {
uintptr_t start_ptr;
uintptr_t end_ptr;
malloc_iterate_callback callback;
void *arg;
} iterate_info_t;
static void malloc_iterate_visitor(void *block, size_t block_size, void *arg)
{
iterate_info_t *iterate_info = (iterate_info_t *)arg;
if ((uintptr_t)block >= iterate_info->start_ptr && (uintptr_t)block < iterate_info->end_ptr) {
iterate_info->callback(block, block_size, iterate_info->arg);
}
}
static void malloc_iterate_occupied_bin(occupied_bin_t *occupied_bin, iterate_info_t *iterate_info)
{
for (struct chunk *c = occupied_bin->head; c != NULL; c = c->next_occupied) {
malloc_iterate_visitor(CHUNK_TO_MEM(c), CHUNK_SIZE(c) - OVERHEAD, iterate_info);
}
}
#endif
int malloc_iterate(void* base, size_t size, void (*callback)(void* base, size_t size, void* arg), void* arg)
{
#ifdef MUSL_ITERATE_AND_STATS_API
uintptr_t ptr = (uintptr_t)base;
uintptr_t end_ptr = ptr + size;
iterate_info_t iterate_info = {ptr, end_ptr, callback, arg};
struct __pthread *self, *it;
self = it = __pthread_self();
do {
occupied_bin_t *occupied_bin = __get_occupied_bin(it);
malloc_iterate_occupied_bin(occupied_bin, &iterate_info);
it = it->next;
} while (it != self);
malloc_iterate_occupied_bin(&detached_occupied_bin, &iterate_info);
#endif
return 0;
}
static inline void lock_bin(int i)
{
lock(mal.bins[i].lock);
......@@ -228,7 +450,7 @@ void __dump_heap(int x)
c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
c->csize & 15,
NEXT_CHUNK(c)->psize & 15);
for (i=0; i<64; i++) {
for (i=0; i<BINS_COUNT; i++) {
if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
if (!(mal.binmap & 1ULL<<i))
......@@ -253,8 +475,15 @@ static struct chunk *expand_heap(size_t n)
lock(heap_lock);
#ifdef MUSL_ITERATE_AND_STATS_API
lock(total_heap_space_inc_lock);
#endif
p = __expand_heap(&n);
if (!p) {
#ifdef MUSL_ITERATE_AND_STATS_API
unlock(total_heap_space_inc_lock);
#endif
unlock(heap_lock);
return 0;
}
......@@ -285,6 +514,11 @@ static struct chunk *expand_heap(size_t n)
chunk_checksum_set(w);
#endif
#ifdef MUSL_ITERATE_AND_STATS_API
total_heap_space += n - OVERHEAD;
unlock(total_heap_space_inc_lock);
#endif
unlock(heap_lock);
return w;
......@@ -473,6 +707,7 @@ void *internal_malloc(size_t n)
c = (void *)(base + SIZE_ALIGN - OVERHEAD);
c->csize = len - (SIZE_ALIGN - OVERHEAD);
c->psize = SIZE_ALIGN - OVERHEAD;
#ifdef MALLOC_RED_ZONE
c->state = M_STATE_MMAP | M_STATE_USED;
c->usize = user_size;
......@@ -480,6 +715,9 @@ void *internal_malloc(size_t n)
chunk_poison_set(c);
}
chunk_checksum_set(c);
#endif
#ifdef MUSL_ITERATE_AND_STATS_API
__push_chunk(c);
#endif
return CHUNK_TO_MEM(c);
}
......@@ -531,6 +769,9 @@ void *internal_malloc(size_t n)
c->state &= ~M_RZ_POISON;
}
chunk_checksum_set(c);
#endif
#ifdef MUSL_ITERATE_AND_STATS_API
__push_chunk(c);
#endif
return CHUNK_TO_MEM(c);
}
......@@ -1044,6 +1285,9 @@ void internal_free(void *p)
if (!p) return;
struct chunk *self = MEM_TO_CHUNK(p);
#ifdef MUSL_ITERATE_AND_STATS_API
__pop_chunk(self);
#endif
#ifdef MALLOC_RED_ZONE
/* This is not a valid chunk for freeing */
......@@ -1083,6 +1327,23 @@ void __malloc_donate(char *start, char *end)
c->usize = POINTER_USAGE;
c->state = M_STATE_BRK;
chunk_checksum_set(c);
#endif
#ifdef MUSL_ITERATE_AND_STATS_API
lock(total_heap_space_inc_lock);
total_heap_space += CHUNK_SIZE(c) - OVERHEAD;
#endif
__bin_chunk(c);
#ifdef MUSL_ITERATE_AND_STATS_API
unlock(total_heap_space_inc_lock);
#endif
}
int mallopt(int param, int value)
{
return 0;
}
ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count)
{
return 0;
}
......@@ -30,6 +30,11 @@ void *__memalign(size_t align, size_t len)
struct chunk *c = MEM_TO_CHUNK(mem);
struct chunk *n = MEM_TO_CHUNK(new);
#ifdef MUSL_ITERATE_AND_STATS_API
__pop_chunk(c);
__push_chunk(n);
#endif
if (IS_MMAPPED(c)) {
/* Apply difference between aligned and original
* address to the "extra" field of mmapped chunk.
......
/*
* Copyright (C) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdarg.h>
#include <malloc.h>
#include <errno.h>
#include <string.h>
#include "pthread_impl.h"
#include "malloc_impl.h"
#ifdef MUSL_ITERATE_AND_STATS_API
#define STAT_PRINTF_MAX_LEN 255
#define ALLOCATOR_VERSION 1
#define SEPARATOR_REPEATS 7
typedef void (write_cb_fun)(void *, const char *);
typedef enum {
TABLE, XML
} print_mode;
typedef struct {
size_t mmapped_regions;
size_t total_mmapped_memory;
size_t total_allocated_memory;
size_t total_allocated_heap_space;
} malloc_stats_t;
static void stat_printf(write_cb_fun *write_cb, void *write_cb_arg, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
char buf[STAT_PRINTF_MAX_LEN + 1];
if (vsnprintf(buf, STAT_PRINTF_MAX_LEN, fmt, args)) {
write_cb(write_cb_arg, buf);
} else {
fprintf(stderr, "Error writing to buffer");
}
va_end(args);
}
static void print_thread_stats_table(
write_cb_fun *write_cb,
void *write_cb_arg,
struct __pthread *thread,
malloc_stats_t *stats
)
{
stat_printf(
write_cb,
write_cb_arg,
"%-11d %-23zu %-20zu %-20zu\n",
thread->tid,
stats->total_allocated_memory,
stats->total_mmapped_memory,
stats->mmapped_regions
);
}
static void print_amount_xml(write_cb_fun *write_cb, void *write_cb_arg, const char *name, size_t value)
{
stat_printf(write_cb, write_cb_arg, "<%s>%zu</%s>\n", name, value, name);
}
static void print_thread_specific_amounts_xml(write_cb_fun *write_cb, void *write_cb_arg, malloc_stats_t *stats)
{
print_amount_xml(write_cb, write_cb_arg, "total_allocated_memory", stats->total_allocated_memory);
print_amount_xml(write_cb, write_cb_arg, "total_mmapped_memory", stats->total_mmapped_memory);
print_amount_xml(write_cb, write_cb_arg, "mmapped_regions", stats->mmapped_regions);
}
static void print_thread_stats_xml(
write_cb_fun *write_cb,
void *write_cb_arg,
struct __pthread *thread,
malloc_stats_t *stats
)
{
stat_printf(write_cb, write_cb_arg, "<thread id=\"%d\">\n", thread->tid);
print_thread_specific_amounts_xml(write_cb, write_cb_arg, stats);
stat_printf(write_cb, write_cb_arg, "</thread>\n");
}
static malloc_stats_t add_up_chunks(occupied_bin_t *occupied_bin)
{
malloc_stats_t stats = {0, 0, 0, 0};
for (struct chunk *c = occupied_bin->head; c != NULL; c = c->next_occupied) {
size_t chunk_memory = CHUNK_SIZE(c) - OVERHEAD;
stats.total_allocated_memory += chunk_memory;
if (IS_MMAPPED(c)) {
stats.mmapped_regions++;
stats.total_mmapped_memory += chunk_memory;
} else {
stats.total_allocated_heap_space += chunk_memory;
}
}
return stats;
}
static size_t print_threads(write_cb_fun *write_cb, void *write_cb_arg, print_mode mode)
{
size_t total_allocated_heap_space = 0;
struct __pthread *self, *it;
self = it = __pthread_self();
do {
malloc_stats_t stats = add_up_chunks(__get_occupied_bin(it));
total_allocated_heap_space += stats.total_allocated_heap_space;
if (mode == TABLE) {
print_thread_stats_table(write_cb, write_cb_arg, it, &stats);
} else {
print_thread_stats_xml(write_cb, write_cb_arg, it, &stats);
}
it = it->next;
} while (it != self);
return total_allocated_heap_space;
}
static void print_abandoned_stats_table(write_cb_fun *write_cb, void *write_cb_arg, malloc_stats_t *stats)
{
stat_printf(
write_cb,
write_cb_arg,
"%s\n%-11s %-23zu %-20zu %-20zu\n",
"---------",
"abandoned",
stats->total_allocated_memory,
stats->total_mmapped_memory,
stats->mmapped_regions
);
}
static void print_abandoned_stats_xml(write_cb_fun *write_cb, void *write_cb_arg, malloc_stats_t *stats)
{
stat_printf(write_cb, write_cb_arg, "<abandoned>\n");
print_thread_specific_amounts_xml(write_cb, write_cb_arg, stats);
stat_printf(write_cb, write_cb_arg, "</abandoned>\n");
}
static size_t print_abandoned(write_cb_fun *write_cb, void *write_cb_arg, print_mode mode)
{
malloc_stats_t stats = add_up_chunks(__get_detached_occupied_bin());
if (mode == TABLE) {
print_abandoned_stats_table(write_cb, write_cb_arg, &stats);
} else {
print_abandoned_stats_xml(write_cb, write_cb_arg, &stats);
}
return stats.total_allocated_heap_space;
}
static void print_total_free_heap_space(
write_cb_fun *write_cb,
void *write_cb_arg,
size_t total_allocated_heap_space,
print_mode mode
)
{
if (mode == TABLE) {
stat_printf(write_cb, write_cb_arg, "\n");
for (size_t i = 0; i < SEPARATOR_REPEATS; i++) {
stat_printf(
write_cb,
write_cb_arg,
"-----------"
);
}
stat_printf(
write_cb,
write_cb_arg,
"\ntotal free heap space: %zu\n",
__get_total_heap_space() - total_allocated_heap_space
);
} else {
print_amount_xml(
write_cb,
write_cb_arg,
"total_free_heap_space",
__get_total_heap_space() - total_allocated_heap_space
);
}
}
static void print_to_file(void *fp, const char *s)
{
if (fputs(s, fp) == EOF) {
fprintf(stderr, "Error writing to file stream: %s", strerror(errno));
}
}
static void add_stats(malloc_stats_t *destination, const malloc_stats_t *source)
{
destination->total_allocated_memory += source->total_allocated_memory;
destination->total_mmapped_memory += source->total_mmapped_memory;
destination->mmapped_regions += source->mmapped_regions;
destination->total_allocated_heap_space += source->total_allocated_heap_space;
}
#endif
int malloc_info(int options, FILE* fp)
{
#ifdef MUSL_ITERATE_AND_STATS_API
if (options != 0) {
errno = EINVAL;
return -1;
}
malloc_disable();
stat_printf(print_to_file, fp, "<?xml version=\"1.0\"?>\n");
stat_printf(print_to_file, fp, "<malloc version=\"%d\">\n", ALLOCATOR_VERSION);
stat_printf(print_to_file, fp, "<threads>\n");
size_t total_allocated_heap_space = print_threads(print_to_file, fp, XML);
stat_printf(print_to_file, fp, "</threads>\n");
total_allocated_heap_space += print_abandoned(print_to_file, fp, XML);
print_total_free_heap_space(print_to_file, fp, total_allocated_heap_space, XML);
stat_printf(print_to_file, fp, "</malloc>\n");
malloc_enable();
#endif
return 0;
}
void malloc_stats_print(void (*write_cb) (void *, const char *), void *cbopaque, const char *opts)
{
#ifdef MUSL_ITERATE_AND_STATS_API
malloc_disable();
stat_printf(
write_cb,
cbopaque,
"%-11s %-23s %-20s %-20s\n",
"thread_id",
"total_allocated_memory",
"total_mmapped_memory",
"mmapped_regions"
);
size_t total_allocated_heap_space = print_threads(write_cb, cbopaque, TABLE);
total_allocated_heap_space += print_abandoned(write_cb, cbopaque, TABLE);
print_total_free_heap_space(write_cb, cbopaque, total_allocated_heap_space, TABLE);
malloc_enable();
#endif
}
struct mallinfo2 mallinfo2(void)
{
#ifdef MUSL_ITERATE_AND_STATS_API
malloc_disable();
malloc_stats_t shared_stats = {0, 0, 0, 0};
struct __pthread *self, *it;
self = it = __pthread_self();
do {
malloc_stats_t stats = add_up_chunks(__get_occupied_bin(it));
add_stats(&shared_stats, &stats);
it = it->next;
} while (it != self);
malloc_stats_t abandoned_stats = add_up_chunks(__get_detached_occupied_bin());
add_stats(&shared_stats, &abandoned_stats);
struct mallinfo2 res = {
.hblks = shared_stats.mmapped_regions,
.hblkhd = shared_stats.total_mmapped_memory,
.uordblks = shared_stats.total_allocated_memory,
.fordblks = __get_total_heap_space() - shared_stats.total_allocated_heap_space
};
malloc_enable();
return res;
#endif
return (struct mallinfo2){};
}
struct mallinfo mallinfo(void)
{
struct mallinfo2 mallinfo2_res = mallinfo2();
return (struct mallinfo) {
.hblks = (int) mallinfo2_res.hblks,
.hblkhd = (int) mallinfo2_res.hblkhd,
.uordblks = (int) mallinfo2_res.uordblks,
.fordblks = (int) mallinfo2_res.fordblks,
};
}
......@@ -4,6 +4,7 @@
#include "stdio_impl.h"
#include "libc.h"
#include "lock.h"
#include "malloc_impl.h"
#include <sys/mman.h>
#include <sys/prctl.h>
#include <string.h>
......@@ -145,8 +146,6 @@ _Noreturn void __pthread_exit(void *result)
f(x);
}
__pthread_tsd_run_dtors();
/* Access to target the exiting thread with syscalls that use
* its kernel tid is controlled by killlock. For detached threads,
* any use past this point would have undefined behavior, but for
......@@ -158,6 +157,13 @@ _Noreturn void __pthread_exit(void *result)
__block_app_sigs(&set);
__tl_lock();
#ifdef MUSL_ITERATE_AND_STATS_API
occupied_bin_t *self_tsd = __get_occupied_bin(self);
__merge_bin_chunks(__get_detached_occupied_bin(), self_tsd);
#endif
__pthread_tsd_run_dtors();
#ifdef RESERVE_SIGNAL_STACK
__pthread_release_signal_stack();
#endif
......@@ -400,6 +406,15 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
new->CANARY = self->CANARY;
new->sysinfo = self->sysinfo;
#ifdef MUSL_ITERATE_AND_STATS_API
/* Initialize malloc tsd */
__init_occupied_bin_key_once();
occupied_bin_t *occupied_bin = internal_calloc(sizeof(occupied_bin_t), 1);
if (occupied_bin == NULL) goto fail;
new->tsd[__get_occupied_bin_key()] = occupied_bin;
new->tsd_used = 1;
#endif
/* Setup argument structure for the new thread on its stack.
* It's safe to access from the caller only until the thread
* list is unlocked. */
......@@ -472,7 +487,7 @@ weak_alias(__pthread_create, pthread_create);
struct pthread* __pthread_list_find(pthread_t thread_id, const char* info)
{
struct pthread *thread = (struct pthread *)thread_id;
struct pthread *thread = (struct pthread *)thread_id;
if (NULL == thread) {
log_print("invalid pthread_t (0) passed to %s\n", info);
return NULL;
......@@ -488,7 +503,7 @@ struct pthread* __pthread_list_find(pthread_t thread_id, const char* info)
if (t == thread) return thread;
t = t->next ;
}
log_print("invalid pthread_t %p passed to %s\n", thread, info);
log_print("invalid pthread_t %p passed to %s\n", thread, info);
return NULL;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册