提交 65228e15 编写于 作者: F Far

feat: 增加red zone/poison特性,提高musl内存分配器对溢出和UAF的防护能力

1. chunk overhead区增加两个字段usize和state,分别记录实际占用的payload的大小以及当前chunk的状态。
其中chunk的状态包括是否分配给用户以及是否被下毒。下毒指的是在chunk除有效payload(即用户实际使用
的内存)外的内存中填充进随机生成的数据。在malloc/free时检测这些区域即可实现对溢出以及UAF的校验。

2. 为了提高性能,并不会对所有chunk下毒,而是每POISON_COUNT_DOWN_BASE次malloc/free时进行一次下毒。
Signed-off-by: NFar <yesiyuan2@huawei.com>
Change-Id: Idb341c202d8ec99f5370d4f589ee261ded8b163f
上级 f451e8a1
/*
* Copyright (C) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <signal.h>
#include <errno.h>
#include <string.h>
#include "test.h"
#define ALIGNED_SIZE (4 * sizeof(size_t))
#define OVERFLOW_VAL 0xab
#define LOOP_SIZE 512
#define MALLOC_TIME 67
static void handler(int s)
{
}
static int child(void)
{
char *ptr[MALLOC_TIME];
for (int i = 0; i < LOOP_SIZE; ++i) {
for (int j = 0; j < MALLOC_TIME; ++j) {
ptr[j] = (char *)malloc(ALIGNED_SIZE - 1);
if (!ptr[j]) {
t_error("Malloc failed:%s\n", strerror(errno));
return -1;
}
ptr[j][ALIGNED_SIZE - 1] = (char)(OVERFLOW_VAL - j);
}
for (int j = 0; j < MALLOC_TIME; ++j) {
free(ptr[j]);
}
}
return 0;
}
static pid_t start_child(void)
{
pid_t pid = 0;
int ret = 0;
pid = fork();
if (pid == 0) {
ret = child();
t_error("child process normally out with %d\n", ret);
return ret;
}
return pid;
}
int main(int argc, char *argv[])
{
sigset_t set;
int status = 0;
pid_t pid = 0;
int flag = 0;
char *pname = (argc > 0) ? argv[0] : "malloc-overflow-check";
sigemptyset(&set);
sigaddset(&set, SIGCHLD);
sigprocmask(SIG_BLOCK, &set, 0);
signal(SIGCHLD, handler);
pid = start_child();
if (pid == -1) {
t_error("%s fork failed: %s\n", pname, strerror(errno));
return -1;
}
if (sigtimedwait(&set, 0, &(struct timespec){5, 0}) == -1) { /* Wait for 5 seconds */
if (errno == EAGAIN)
flag = 1;
else
t_error("%s sigtimedwait failed: %s\n", pname, strerror(errno));
if (kill(pid, SIGKILL) == -1)
t_error("%s kill failed: %s\n", pname, strerror(errno));
}
if (waitpid(pid, &status, 0) != pid) {
t_error("%s waitpid failed: %s\n", pname, strerror(errno));
return -1;
}
if (flag) {
t_error("Child process time out\n");
}
if (WIFSIGNALED(status)) {
if (WTERMSIG(status) != SIGSEGV && WTERMSIG(status) != SIGILL) {
t_error("%s child process out with %s\n", pname, strsignal(WTERMSIG(status)));
return -1;
}
} else {
t_error("%s child process finished normally\n", pname);
}
return t_status;
}
/*
* Copyright (C) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <unistd.h>
#include <sys/wait.h>
#include <signal.h>
#include <errno.h>
#include <string.h>
#include "test.h"
#define ALIGNED_SIZE (8 * sizeof(size_t))
#define POINTER_USAGE (2 * sizeof(void *))
#define UAF_VAL 0xab
#define LOOP_SIZE 512
#define MALLOC_TIME 67
static void handler(int s)
{
}
static int child(void)
{
char *ptr[MALLOC_TIME];
char *ptr1[MALLOC_TIME];
char *divide[MALLOC_TIME];
for (int i = 0; i < LOOP_SIZE; ++i) {
for (int j = 0; j < MALLOC_TIME; ++j) {
ptr[j] = (char *)malloc(ALIGNED_SIZE - 1);
if (!ptr[j]) {
t_error("Malloc failed:%s\n", strerror(errno));
return -1;
}
divide[j] = (char *)malloc(ALIGNED_SIZE - 1);
if (!divide[j]) {
t_error("Malloc divide failed:%s\n", strerror(errno));
return -1;
}
}
for (int j = 0; j < MALLOC_TIME; ++j) {
free(ptr[j]);
/* Use after free, we should avoid changing the bin/quarantine deque pointer */
ptr[j][POINTER_USAGE] = (char)(UAF_VAL - j);
}
for (int j = 0; j < MALLOC_TIME; ++j) {
ptr1[j] = (char *)malloc(ALIGNED_SIZE - 1);
if (!ptr1[j]) {
t_error("Malloc failed:%s\n", strerror(errno));
return -1;
}
}
for (int j = 0; j < MALLOC_TIME; ++j) {
free(divide[j]);
divide[j][POINTER_USAGE] = (char)(UAF_VAL - j);
}
for (int j = 0; j < MALLOC_TIME; ++j) {
free(ptr1[j]);
ptr1[j][POINTER_USAGE] = (char)(UAF_VAL - j);
}
}
return 0;
}
static pid_t start_child(void)
{
pid_t pid = 0;
int ret = 0;
pid = fork();
if (pid == 0) {
ret = child();
t_error("child process normally out with %d\n", ret);
return ret;
}
return pid;
}
int main(int argc, char *argv[])
{
sigset_t set;
int status = 0;
pid_t pid = 0;
int flag = 0;
char *pname = (argc > 0) ? argv[0] : "malloc-uaf-check";
sigemptyset(&set);
sigaddset(&set, SIGCHLD);
sigprocmask(SIG_BLOCK, &set, 0);
signal(SIGCHLD, handler);
pid = start_child();
if (pid == -1) {
t_error("%s fork failed: %s\n", pname, strerror(errno));
return -1;
}
if (sigtimedwait(&set, 0, &(struct timespec){5, 0}) == -1) { /* Wait for 5 seconds */
if (errno == EAGAIN)
flag = 1;
else
t_error("%s sigtimedwait failed: %s\n", pname, strerror(errno));
if (kill(pid, SIGKILL) == -1)
t_error("%s kill failed: %s\n", pname, strerror(errno));
}
if (waitpid(pid, &status, 0) != pid) {
t_error("%s waitpid failed: %s\n", pname, strerror(errno));
return -1;
}
if (flag) {
t_error("Child process time out\n");
}
if (WIFSIGNALED(status)) {
if (WTERMSIG(status) != SIGSEGV && WTERMSIG(status) != SIGILL) {
t_error("%s child process out with %s\n", pname, strsignal(WTERMSIG(status)));
return -1;
}
} else {
t_error("%s child process finished normally\n", pname);
}
return t_status;
}
import("../../../musl_config.gni")
regression_list = [
"daemon-failure",
"dn_expand-empty",
......@@ -77,3 +79,10 @@ regression_list = [
"wcsncpy-read-overflow",
"wcsstr-false-negative",
]
if (musl_secure_level >= 3) {
regression_list += [
"malloc-overflow-check",
"malloc-uaf-check",
]
}
......@@ -314,6 +314,9 @@ template("musl_libs") {
if (musl_secure_level > 1) {
defines += [ "MALLOC_FREELIST_QUARANTINE" ]
}
if (musl_secure_level > 2) {
defines += [ "MALLOC_RED_ZONE" ]
}
if (is_debug || musl_secure_level >= 3) {
defines += [ "MALLOC_SECURE_ALL" ]
}
......
......@@ -25,10 +25,18 @@
#define MALLOC_FREELIST_QUARANTINE
#endif
#ifndef MALLOC_RED_ZONE
#define MALLOC_RED_ZONE
#endif
#endif // MALLOC_SECURE_ALL
#if defined(MALLOC_FREELIST_QUARANTINE) && !defined(MALLOC_FREELIST_HARDENED)
#error MALLOC_FREELIST_QUARANTINE can be only applied when MALLOC_FREELIST_HARDENED is set.
#endif
#if defined(MALLOC_RED_ZONE) && !defined(MALLOC_FREELIST_QUARANTINE)
#error MALLOC_RED_ZONE can be only applied when MALLOC_FREELIST_QUARANTINE is set.
#endif
#endif // MALLOC_CONFIG_H
......@@ -12,6 +12,10 @@ hidden void *__memalign(size_t, size_t);
struct chunk {
size_t psize, csize;
#ifdef MALLOC_RED_ZONE
size_t usize;
size_t state;
#endif
struct chunk *next, *prev;
};
......@@ -24,11 +28,21 @@ struct bin {
#endif
};
#define SIZE_ALIGN (4*sizeof(size_t))
#define SIZE_MASK (-SIZE_ALIGN)
#ifndef MALLOC_RED_ZONE
#define SIZE_ALIGN (4*sizeof(size_t))
#define OVERHEAD (2*sizeof(size_t))
#else
#define SIZE_ALIGN (8*sizeof(size_t))
#define OVERHEAD (4*sizeof(size_t))
#endif
#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
#ifndef MALLOC_RED_ZONE
#define DONTCARE 16
#else
#define DONTCARE OVERHEAD
#define POINTER_USAGE (2*sizeof(void *))
#endif
#define RECLAIM 163840
#ifdef MALLOC_FREELIST_QUARANTINE
......@@ -50,6 +64,21 @@ struct bin {
#define QUARANTINE_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.quarantine[i].head))
#endif
#ifdef MALLOC_RED_ZONE
#define M_STATE_FREE 0x00
#define M_STATE_USED 0x01U
#define M_STATE_BRK 0x02U
#define M_STATE_MMAP 0x04U
#define M_RZ_NONE 0x00
#define M_RZ_POISON 0x10U
#define M_STATE_MASK 0xffU
#define M_CHECKSUM_SHIFT 8
#define POISON_PERIOD 31
#endif
#define C_INUSE ((size_t)1)
#define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
......@@ -66,4 +95,10 @@ hidden void *internal_calloc(size_t m, size_t n);
hidden void *internal_realloc(void *p, size_t n);
#ifdef MALLOC_RED_ZONE
hidden void chunk_checksum_set(struct chunk *c);
hidden int chunk_checksum_check(struct chunk *c);
#endif
#endif
......@@ -29,6 +29,11 @@ static struct {
struct bin quarantine[QUARANTINE_NUM];
size_t quarantined_count[QUARANTINE_NUM];
size_t quarantined_size[QUARANTINE_NUM];
#ifdef MALLOC_RED_ZONE
char poison[64];
volatile int poison_lock[2];
int poison_count_down;
#endif
#endif
} mal;
......@@ -146,6 +151,73 @@ static int bin_index_up(size_t x)
return bin_tab[x/128-4] + 17;
}
#ifdef MALLOC_RED_ZONE
static inline size_t chunk_checksum_calculate(struct chunk *c)
{
return (((size_t)c) ^ c->csize ^ c->usize ^ (c->state & M_STATE_MASK)) << M_CHECKSUM_SHIFT;
}
void chunk_checksum_set(struct chunk *c)
{
c->state = (c->state & M_STATE_MASK) | chunk_checksum_calculate(c);
}
int chunk_checksum_check(struct chunk *c)
{
return (c->state & ~M_STATE_MASK) ^ chunk_checksum_calculate(c);
}
static inline char get_poison(int i)
{
char poison = 0;
lock(mal.poison_lock);
if (!mal.poison[i]) {
mal.poison[i] = (char)(uintptr_t)next_key();
}
poison = mal.poison[i];
unlock(mal.poison_lock);
return poison;
}
static inline int need_poison(void)
{
int ret = 0;
lock(mal.poison_lock);
if (mal.poison_count_down == 0) {
/* Make sure the period is POISON_COUNT_DOWN_BASE */
mal.poison_count_down = POISON_PERIOD - 1;
ret = 1;
} else {
--mal.poison_count_down;
}
unlock(mal.poison_lock);
return ret;
}
static inline void chunk_poison_set(struct chunk *c)
{
char * start = ((char *)CHUNK_TO_MEM(c)) + c->usize;
size_t size = CHUNK_SIZE(c) - OVERHEAD - c->usize;
char val = get_poison(bin_index(CHUNK_SIZE(c)));
memset(start, val, size);
c->state |= M_RZ_POISON;
}
void chunk_poison_check(struct chunk *c)
{
size_t csize = CHUNK_SIZE(c);
char poison = get_poison(bin_index(csize));
size_t padding_size = csize - OVERHEAD - c->usize;
char *start = (char *)c + OVERHEAD + c->usize;
for (size_t i = 0; i < padding_size; ++i) {
/* Poison not right, crash */
if (start[i] != poison) {
a_crash();
}
}
}
#endif
#if 0
void __dump_heap(int x)
{
......@@ -207,6 +279,11 @@ static struct chunk *expand_heap(size_t n)
* zero-size sentinel header at the old end-of-heap. */
w = MEM_TO_CHUNK(p);
w->csize = n | C_INUSE;
#ifdef MALLOC_RED_ZONE
w->state = M_STATE_BRK;
w->usize = POINTER_USAGE;
chunk_checksum_set(w);
#endif
unlock(heap_lock);
......@@ -225,7 +302,15 @@ static int adjust_size(size_t *n)
return 0;
}
}
#ifdef MALLOC_RED_ZONE
/*
* *n + OVERHEAD + SIZE_ALIGN + 1 - 1
* to make sure a least 1 byte for red zone
*/
*n = (*n + OVERHEAD + SIZE_ALIGN) & SIZE_MASK;
#else
*n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
#endif
return 0;
}
......@@ -323,6 +408,13 @@ static int pretrim(struct chunk *self, size_t n, int i, int j)
split->csize = n1-n;
next->psize = n1-n;
self->csize = n | C_INUSE;
#ifdef MALLOC_RED_ZONE
/* split poison state keep up to self for less poison operations */
self->state &= ~M_RZ_POISON;
split->state = M_STATE_BRK;
split->usize = POINTER_USAGE;
chunk_checksum_set(split);
#endif
return 1;
}
......@@ -340,6 +432,12 @@ static void trim(struct chunk *self, size_t n)
split->csize = n1-n | C_INUSE;
next->psize = n1-n | C_INUSE;
self->csize = n | C_INUSE;
#ifdef MALLOC_RED_ZONE
/* Remove the poison tag, because of triming chunk */
split->state = M_STATE_BRK;
split->usize = POINTER_USAGE;
chunk_checksum_set(split);
#endif
__bin_chunk(split);
}
......@@ -357,6 +455,10 @@ void *internal_malloc(size_t n)
{
struct chunk *c;
int i, j;
#ifdef MALLOC_RED_ZONE
size_t user_size = n;
char poison;
#endif
if (adjust_size(&n) < 0) return 0;
......@@ -371,6 +473,14 @@ void *internal_malloc(size_t n)
c = (void *)(base + SIZE_ALIGN - OVERHEAD);
c->csize = len - (SIZE_ALIGN - OVERHEAD);
c->psize = SIZE_ALIGN - OVERHEAD;
#ifdef MALLOC_RED_ZONE
c->state = M_STATE_MMAP | M_STATE_USED;
c->usize = user_size;
if (need_poison()) {
chunk_poison_set(c);
}
chunk_checksum_set(c);
#endif
return CHUNK_TO_MEM(c);
}
......@@ -397,6 +507,11 @@ void *internal_malloc(size_t n)
#endif
c = encode_chunk(mal.bins[j].head, key); /* Decode the head pointer */
if (c != BIN_TO_CHUNK(j)) {
#ifdef MALLOC_RED_ZONE
if (c->state & M_RZ_POISON) {
chunk_poison_check(c);
}
#endif
if (!pretrim(c, n, i, j)) unbin(c, j);
unlock_bin(j);
break;
......@@ -407,6 +522,16 @@ void *internal_malloc(size_t n)
/* Now patch up in case we over-allocated */
trim(c, n);
#ifdef MALLOC_RED_ZONE
c->usize = user_size;
c->state |= M_STATE_USED;
if (need_poison()) {
chunk_poison_set(c);
} else {
c->state &= ~M_RZ_POISON;
}
chunk_checksum_set(c);
#endif
return CHUNK_TO_MEM(c);
}
......@@ -461,6 +586,9 @@ void *internal_realloc(void *p, size_t n)
struct chunk *self, *next;
size_t n0, n1;
void *new;
#ifdef MALLOC_RED_ZONE
size_t user_size = n;
#endif
if (!p) return internal_malloc(n);
if (!n) {
......@@ -472,6 +600,12 @@ void *internal_realloc(void *p, size_t n)
self = MEM_TO_CHUNK(p);
n1 = n0 = CHUNK_SIZE(self);
#ifdef MALLOC_RED_ZONE
/* Not a valid chunk */
if (!(self->state & M_STATE_USED)) a_crash();
if (chunk_checksum_check(self)) a_crash();
if (self->state & M_RZ_POISON) chunk_poison_check(self);
#endif
if (IS_MMAPPED(self)) {
size_t extra = self->psize;
......@@ -479,6 +613,10 @@ void *internal_realloc(void *p, size_t n)
size_t oldlen = n0 + extra;
size_t newlen = n + extra;
/* Crash on realloc of freed chunk */
#ifdef MALLOC_RED_ZONE
/* Wrong malloc type */
if (!(self->state & M_STATE_MMAP)) a_crash();
#endif
if (extra & 1) a_crash();
if (newlen < PAGE_SIZE && (new = internal_malloc(n-OVERHEAD))) {
n0 = n;
......@@ -491,6 +629,15 @@ void *internal_realloc(void *p, size_t n)
goto copy_realloc;
self = (void *)(base + extra);
self->csize = newlen - extra;
#ifdef MALLOC_RED_ZONE
self->usize = user_size;
if (need_poison()) {
chunk_poison_set(self);
} else {
self->state &= ~M_RZ_POISON;
}
chunk_checksum_set(self);
#endif
return CHUNK_TO_MEM(self);
}
......@@ -505,6 +652,10 @@ void *internal_realloc(void *p, size_t n)
if (n > n1 && alloc_fwd(next)) {
n1 += CHUNK_SIZE(next);
next = NEXT_CHUNK(next);
#ifdef MALLOC_RED_ZONE
/* alloc forward arises, remove the poison tag */
self->state &= ~M_RZ_POISON;
#endif
}
/* FIXME: find what's wrong here and reenable it..? */
if (0 && n > n1 && alloc_rev(self)) {
......@@ -518,6 +669,15 @@ void *internal_realloc(void *p, size_t n)
if (n <= n1) {
//memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
trim(self, n);
#ifdef MALLOC_RED_ZONE
self->usize = user_size;
if (need_poison()) {
chunk_poison_set(self);
} else {
self->state &= ~M_RZ_POISON;
}
chunk_checksum_set(self);
#endif
return CHUNK_TO_MEM(self);
}
......@@ -526,7 +686,12 @@ copy_realloc:
new = internal_malloc(n-OVERHEAD);
if (!new) return 0;
copy_free_ret:
#ifndef MALLOC_RED_ZONE
memcpy(new, p, n0-OVERHEAD);
#else
memcpy(new, p, self->usize < user_size ? self->usize : user_size);
chunk_checksum_set(self);
#endif
internal_free(CHUNK_TO_MEM(self));
return new;
}
......@@ -571,6 +736,10 @@ void __bin_chunk(struct chunk *self)
reclaim = 1;
next = NEXT_CHUNK(next);
}
#ifdef MALLOC_RED_ZONE
/* if poisoned chunk is combined, we should remove the poisoned tag */
self->state &= ~M_RZ_POISON;
#endif
}
if (!(mal.binmap & 1ULL<<i))
......@@ -600,9 +769,15 @@ void __bin_chunk(struct chunk *self)
#else
__mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
#endif
#ifdef MALLOC_RED_ZONE
self->state &= ~M_RZ_POISON;
#endif
}
#ifdef MALLOC_RED_ZONE
chunk_checksum_set(self);
#endif
unlock_bin(i);
}
......@@ -611,6 +786,10 @@ static void unmap_chunk(struct chunk *self)
size_t extra = self->psize;
char *base = (char *)self - extra;
size_t len = CHUNK_SIZE(self) + extra;
#ifdef MALLOC_RED_ZONE
/* Wrong chunk type */
if (!(self->state & M_STATE_MMAP)) a_crash();
#endif
/* Crash on double free */
if (extra & 1) a_crash();
__munmap(base, len);
......@@ -631,6 +810,11 @@ static void quarantine_contained(struct chunk *self)
struct chunk *prev;
void *key;
#ifdef MALLOC_RED_ZONE
/* Wrong chunk type */
if (!(self->state & M_STATE_BRK)) a_crash();
#endif
lock_quarantine(i);
key = mal.quarantine[i].key;
cur = encode_chunk(mal.quarantine[i].head, key);
......@@ -657,8 +841,22 @@ static void quarantine_bin(struct chunk *self)
void *key;
int i;
#ifdef MALLOC_RED_ZONE
self->state &= ~M_STATE_USED;
#endif
/* Avoid quarantining large memory */
if (size > QUARANTINE_THRESHOLD) {
#ifdef MALLOC_RED_ZONE
self->usize = POINTER_USAGE;
if ((self->psize & self->csize & NEXT_CHUNK(self)->csize & C_INUSE) &&
need_poison()) {
/* Self will not be combined mostly */
chunk_poison_set(self);
} else {
self->state &= ~M_RZ_POISON;
}
chunk_checksum_set(self);
#endif
__bin_chunk(self);
return;
}
......@@ -677,6 +875,9 @@ static void quarantine_bin(struct chunk *self)
/* Bin the quarantined chunk */
do {
next = encode_chunk(cur->next, key);
#ifdef MALLOC_RED_ZONE
if (cur->state & M_RZ_POISON) chunk_poison_check(cur);
#endif
__bin_chunk(cur);
cur = next;
} while (cur != QUARANTINE_TO_CHUNK(i));
......@@ -692,6 +893,16 @@ static void quarantine_bin(struct chunk *self)
mal.quarantined_size[i] += size;
mal.quarantined_count[i] += 1;
#ifdef MALLOC_RED_ZONE
if (need_poison()) {
self->usize = POINTER_USAGE;
chunk_poison_set(self);
} else {
self->state &= ~M_RZ_POISON;
}
chunk_checksum_set(self);
#endif
unlock_quarantine(i);
}
#endif
......@@ -711,6 +922,12 @@ void internal_free(void *p)
struct chunk *self = MEM_TO_CHUNK(p);
#ifdef MALLOC_RED_ZONE
/* This is not a valid chunk for freeing */
if (chunk_checksum_check(self)) a_crash();
if (!(self->state & M_STATE_USED)) a_crash();
if (self->state & M_RZ_POISON) chunk_poison_check(self);
#endif
if (IS_MMAPPED(self))
unmap_chunk(self);
else {
......@@ -739,5 +956,10 @@ void __malloc_donate(char *start, char *end)
struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
c->psize = n->csize = C_INUSE;
c->csize = n->psize = C_INUSE | (end-start);
#ifdef MALLOC_RED_ZONE
c->usize = POINTER_USAGE;
c->state = M_STATE_BRK;
chunk_checksum_set(c);
#endif
__bin_chunk(c);
}
#include <malloc.h>
#include "malloc_impl.h"
#include "malloc_config.h"
#ifdef MALLOC_RED_ZONE
#include "atomic.h"
#endif
hidden void *(*const __realloc_dep)(void *, size_t) = realloc;
size_t malloc_usable_size(void *p)
{
#ifndef MALLOC_RED_ZONE
return p ? CHUNK_SIZE(MEM_TO_CHUNK(p)) - OVERHEAD : 0;
#else
struct chunk *c;
if (!p) {
return 0;
}
c = MEM_TO_CHUNK(p);
if (chunk_checksum_check(c)) {
a_crash();
}
if (!(c->state & M_STATE_USED)) {
return 0;
}
return c->usize;
#endif
}
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
#include "malloc_impl.h"
#include "malloc_config.h"
void *__memalign(size_t align, size_t len)
{
unsigned char *mem, *new;
if ((align & -align) != align) {
errno = EINVAL;
return 0;
}
if (len > SIZE_MAX - align || __malloc_replaced) {
errno = ENOMEM;
return 0;
}
if (align <= SIZE_ALIGN)
return malloc(len);
if (!(mem = malloc(len + align-1)))
return 0;
new = (void *)((uintptr_t)mem + align-1 & -align);
if (new == mem) return mem;
struct chunk *c = MEM_TO_CHUNK(mem);
struct chunk *n = MEM_TO_CHUNK(new);
if (IS_MMAPPED(c)) {
/* Apply difference between aligned and original
* address to the "extra" field of mmapped chunk.
*/
n->psize = c->psize + (new-mem);
n->csize = c->csize - (new-mem);
#ifdef MALLOC_RED_ZONE
n->usize = len;
n->state = M_STATE_MMAP | M_STATE_USED;
chunk_checksum_set(n);
#endif
return new;
}
struct chunk *t = NEXT_CHUNK(c);
/* Split the allocated chunk into two chunks. The aligned part
* that will be used has the size in its footer reduced by the
* difference between the aligned and original addresses, and
* the resulting size copied to its header. A new header and
* footer are written for the split-off part to be freed. */
n->psize = c->csize = C_INUSE | (new-mem);
n->csize = t->psize -= new-mem;
#ifdef MALLOC_RED_ZONE
/* Update extra overhead */
c->usize = OVERHEAD;
c->state = M_STATE_BRK;
chunk_checksum_set(c);
n->usize = len;
n->state = M_STATE_BRK | M_STATE_USED;
chunk_checksum_set(n);
#endif
__bin_chunk(c);
return new;
}
weak_alias(__memalign, memalign);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册