[dpdk-dev] [PATCH v3 60/68] eal: enable memory hotplug support in rte_malloc

Anatoly Burakov anatoly.burakov at intel.com
Wed Apr 4 01:22:12 CEST 2018


This set of changes enables rte_malloc to allocate and free memory
as needed. Currently, it is disabled because legacy mem mode is
enabled unconditionally.

The way it works is, first malloc checks if there is enough memory
already allocated to satisfy user's request. If there isn't, we try
and allocate more memory. The reverse happens with free - we free
an element, check its size (including free element merging due to
adjacency) and see if it's bigger than hugepage size and that its
start and end span a hugepage or more. Then we remove the area from
malloc heap (adjusting element lengths where appropriate), and
deallocate the page.

For legacy mode, runtime alloc/free of pages is disabled.

It is worth noting that memseg lists are being sorted by page size,
and that we try our best to satisfy user's request. That is, if
the user requests an element from a 2MB page memory, we will check
if we can satisfy that request from existing memory, if not we try
and allocate more 2MB pages. If that fails and user also specified
a "size is hint" flag, we then check other page sizes and try to
allocate from there. If that fails too, then, depending on flags,
we may try allocating from other sockets. In other words, we try
our best to give the user what they asked for, but going to other
sockets is last resort - first we try to allocate more memory on
the same socket.

Signed-off-by: Anatoly Burakov <anatoly.burakov at intel.com>
---

Notes:
    v3:
    - Compile fixes

 lib/librte_eal/common/eal_common_memzone.c |  26 +--
 lib/librte_eal/common/malloc_elem.c        |  86 +++++++
 lib/librte_eal/common/malloc_elem.h        |   3 +
 lib/librte_eal/common/malloc_heap.c        | 347 ++++++++++++++++++++++++++++-
 lib/librte_eal/common/malloc_heap.h        |   4 +-
 lib/librte_eal/common/rte_malloc.c         |  31 +--
 6 files changed, 433 insertions(+), 64 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index aed9331..d522883 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -94,7 +94,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
 	struct rte_mem_config *mcfg;
 	struct rte_fbarray *arr;
 	size_t requested_len;
-	int socket, i, mz_idx;
+	int mz_idx;
 
 	/* get pointer to global configuration */
 	mcfg = rte_eal_get_configuration()->mem_config;
@@ -179,29 +179,9 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
 		}
 	}
 
-	if (socket_id == SOCKET_ID_ANY)
-		socket = malloc_get_numa_socket();
-	else
-		socket = socket_id;
-
 	/* allocate memory on heap */
-	void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
-			requested_len, flags, align, bound, contig);
-
-	if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) {
-		/* try other heaps */
-		for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
-			if (socket == i)
-				continue;
-
-			mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
-					NULL, requested_len, flags, align,
-					bound, contig);
-			if (mz_addr != NULL)
-				break;
-		}
-	}
-
+	void *mz_addr = malloc_heap_alloc(NULL, requested_len, socket_id, flags,
+			align, bound, contig);
 	if (mz_addr == NULL) {
 		rte_errno = ENOMEM;
 		return NULL;
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 9db416f..4346532 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -447,6 +447,92 @@ malloc_elem_free(struct malloc_elem *elem)
 	return elem;
 }
 
+/* assume all checks were already done */
+void
+malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
+{
+	struct malloc_elem *hide_start, *hide_end, *prev, *next;
+	size_t len_before, len_after;
+
+	hide_start = start;
+	hide_end = RTE_PTR_ADD(start, len);
+
+	prev = elem->prev;
+	next = elem->next;
+
+	len_before = RTE_PTR_DIFF(hide_start, elem);
+	len_after = RTE_PTR_DIFF(next, hide_end);
+
+	if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+		/* split after */
+		split_elem(elem, hide_end);
+
+		malloc_elem_free_list_insert(hide_end);
+	} else if (len_after >= MALLOC_ELEM_HEADER_LEN) {
+		/* shrink current element */
+		elem->size -= len_after;
+		memset(hide_end, 0, sizeof(*hide_end));
+
+		/* copy next element's data to our pad */
+		memcpy(hide_end, next, sizeof(*hide_end));
+
+		/* pad next element */
+		next->state = ELEM_PAD;
+		next->pad = len_after;
+
+		/* next element is busy, would've been merged otherwise */
+		hide_end->pad = len_after;
+		hide_end->size += len_after;
+
+		/* adjust pointers to point to our new pad */
+		if (next->next)
+			next->next->prev = hide_end;
+		elem->next = hide_end;
+	} else if (len_after > 0) {
+		RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
+		rte_panic("blow up\n");
+		return;
+	}
+
+	if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+		/* split before */
+		split_elem(elem, hide_start);
+
+		prev = elem;
+		elem = hide_start;
+
+		malloc_elem_free_list_insert(prev);
+	} else if (len_before > 0) {
+		/*
+		 * unlike with elements after current, here we don't need to
+		 * pad elements, but rather just increase the size of previous
+		 * element, copy the old header and and set up trailer.
+		 */
+		void *trailer = RTE_PTR_ADD(prev,
+				prev->size - MALLOC_ELEM_TRAILER_LEN);
+
+		memcpy(hide_start, elem, sizeof(*elem));
+		hide_start->size = len;
+
+		prev->size += len_before;
+		set_trailer(prev);
+
+		/* update pointers */
+		prev->next = hide_start;
+		if (next)
+			next->prev = hide_start;
+
+		elem = hide_start;
+
+		/* erase old trailer */
+		memset(trailer, 0, MALLOC_ELEM_TRAILER_LEN);
+		/* erase old header */
+		memset(elem, 0, sizeof(*elem));
+	}
+
+	remove_elem(elem);
+}
+
 /*
  * attempt to resize a malloc_elem by expanding into any free space
  * immediately after it in memory.
diff --git a/lib/librte_eal/common/malloc_elem.h b/lib/librte_eal/common/malloc_elem.h
index 620dd44..8f4aef8 100644
--- a/lib/librte_eal/common/malloc_elem.h
+++ b/lib/librte_eal/common/malloc_elem.h
@@ -154,6 +154,9 @@ int
 malloc_elem_resize(struct malloc_elem *elem, size_t size);
 
 void
+malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len);
+
+void
 malloc_elem_free_list_remove(struct malloc_elem *elem);
 
 /*
diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c
index d798675..5f8c643 100644
--- a/lib/librte_eal/common/malloc_heap.c
+++ b/lib/librte_eal/common/malloc_heap.c
@@ -20,8 +20,10 @@
 #include <rte_spinlock.h>
 #include <rte_memcpy.h>
 #include <rte_atomic.h>
+#include <rte_fbarray.h>
 
 #include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
 #include "malloc_elem.h"
 #include "malloc_heap.h"
 
@@ -149,48 +151,371 @@ find_suitable_element(struct malloc_heap *heap, size_t size,
  * scan fails. Once the new memseg is added, it re-scans and should return
  * the new element after releasing the lock.
  */
-void *
-malloc_heap_alloc(struct malloc_heap *heap,
-		const char *type __attribute__((unused)), size_t size, unsigned flags,
-		size_t align, size_t bound, bool contig)
+static void *
+heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
+		unsigned int flags, size_t align, size_t bound, bool contig)
 {
 	struct malloc_elem *elem;
 
 	size = RTE_CACHE_LINE_ROUNDUP(size);
 	align = RTE_CACHE_LINE_ROUNDUP(align);
 
-	rte_spinlock_lock(&heap->lock);
-
 	elem = find_suitable_element(heap, size, flags, align, bound, contig);
 	if (elem != NULL) {
 		elem = malloc_elem_alloc(elem, size, align, bound, contig);
+
 		/* increase heap's count of allocated elements */
 		heap->alloc_count++;
 	}
-	rte_spinlock_unlock(&heap->lock);
 
 	return elem == NULL ? NULL : (void *)(&elem[1]);
 }
 
+static int
+try_expand_heap(struct malloc_heap *heap, size_t pg_sz, size_t elt_size,
+		int socket, unsigned int flags, size_t align, size_t bound,
+		bool contig)
+{
+	size_t map_len;
+	struct rte_memseg_list *msl;
+	struct rte_memseg **ms;
+	struct malloc_elem *elem;
+	int n_segs, allocd_pages;
+	void *ret, *map_addr;
+
+	align = RTE_MAX(align, MALLOC_ELEM_HEADER_LEN);
+	map_len = RTE_ALIGN_CEIL(align + elt_size + MALLOC_ELEM_TRAILER_LEN,
+			pg_sz);
+
+	n_segs = map_len / pg_sz;
+
+	/* we can't know in advance how many pages we'll need, so malloc */
+	ms = malloc(sizeof(*ms) * n_segs);
+
+	allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
+			socket, true);
+
+	/* make sure we've allocated our pages... */
+	if (allocd_pages < 0)
+		goto free_ms;
+
+	map_addr = ms[0]->addr;
+	msl = rte_mem_virt2memseg_list(map_addr);
+
+	/* check if we wanted contiguous memory but didn't get it */
+	if (contig && !eal_memalloc_is_contig(msl, map_addr, map_len)) {
+		RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
+				__func__);
+		goto free_pages;
+	}
+
+	/* add newly minted memsegs to malloc heap */
+	elem = malloc_heap_add_memory(heap, msl, map_addr, map_len);
+
+	/* try once more, as now we have allocated new memory */
+	ret = find_suitable_element(heap, elt_size, flags, align, bound,
+			contig);
+
+	if (ret == NULL)
+		goto free_elem;
+
+	RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
+		socket, map_len >> 20ULL);
+
+	free(ms);
+
+	return 0;
+
+free_elem:
+	malloc_elem_free_list_remove(elem);
+	malloc_elem_hide_region(elem, map_addr, map_len);
+	heap->total_size -= map_len;
+
+free_pages:
+	eal_memalloc_free_seg_bulk(ms, n_segs);
+free_ms:
+	free(ms);
+
+	return -1;
+}
+
+static int
+compare_pagesz(const void *a, const void *b)
+{
+	const struct rte_memseg_list * const*mpa = a;
+	const struct rte_memseg_list * const*mpb = b;
+	const struct rte_memseg_list *msla = *mpa;
+	const struct rte_memseg_list *mslb = *mpb;
+	uint64_t pg_sz_a = msla->page_sz;
+	uint64_t pg_sz_b = mslb->page_sz;
+
+	if (pg_sz_a < pg_sz_b)
+		return -1;
+	if (pg_sz_a > pg_sz_b)
+		return 1;
+	return 0;
+}
+
+static int
+alloc_mem_on_socket(size_t size, int socket, unsigned int flags, size_t align,
+		size_t bound, bool contig)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
+	struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
+	struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
+	uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
+	uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
+	uint64_t prev_pg_sz;
+	int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
+	bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
+	unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
+	void *ret;
+
+	memset(requested_msls, 0, sizeof(requested_msls));
+	memset(other_msls, 0, sizeof(other_msls));
+	memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
+	memset(other_pg_sz, 0, sizeof(other_pg_sz));
+
+	/*
+	 * go through memseg list and take note of all the page sizes available,
+	 * and if any of them were specifically requested by the user.
+	 */
+	n_requested_msls = 0;
+	n_other_msls = 0;
+	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+		struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+		if (msl->socket_id != socket)
+			continue;
+
+		if (msl->base_va == NULL)
+			continue;
+
+		/* if pages of specific size were requested */
+		if (size_flags != 0 && check_hugepage_sz(size_flags,
+				msl->page_sz))
+			requested_msls[n_requested_msls++] = msl;
+		else if (size_flags == 0 || size_hint)
+			other_msls[n_other_msls++] = msl;
+	}
+
+	/* sort the lists, smallest first */
+	qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
+			compare_pagesz);
+	qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
+			compare_pagesz);
+
+	/* now, extract page sizes we are supposed to try */
+	prev_pg_sz = 0;
+	n_requested_pg_sz = 0;
+	for (i = 0; i < n_requested_msls; i++) {
+		uint64_t pg_sz = requested_msls[i]->page_sz;
+
+		if (prev_pg_sz != pg_sz) {
+			requested_pg_sz[n_requested_pg_sz++] = pg_sz;
+			prev_pg_sz = pg_sz;
+		}
+	}
+	prev_pg_sz = 0;
+	n_other_pg_sz = 0;
+	for (i = 0; i < n_other_msls; i++) {
+		uint64_t pg_sz = other_msls[i]->page_sz;
+
+		if (prev_pg_sz != pg_sz) {
+			other_pg_sz[n_other_pg_sz++] = pg_sz;
+			prev_pg_sz = pg_sz;
+		}
+	}
+
+	/* finally, try allocating memory of specified page sizes, starting from
+	 * the smallest sizes
+	 */
+	for (i = 0; i < n_requested_pg_sz; i++) {
+		uint64_t pg_sz = requested_pg_sz[i];
+
+		/*
+		 * do not pass the size hint here, as user expects other page
+		 * sizes first, before resorting to best effort allocation.
+		 */
+		if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
+				align, bound, contig))
+			return 0;
+	}
+	if (n_other_pg_sz == 0)
+		return -1;
+
+	/* now, check if we can reserve anything with size hint */
+	ret = find_suitable_element(heap, size, flags, align, bound, contig);
+	if (ret != NULL)
+		return 0;
+
+	/*
+	 * we still couldn't reserve memory, so try expanding heap with other
+	 * page sizes, if there are any
+	 */
+	for (i = 0; i < n_other_pg_sz; i++) {
+		uint64_t pg_sz = other_pg_sz[i];
+
+		if (!try_expand_heap(heap, pg_sz, size, socket, flags,
+				align, bound, contig))
+			return 0;
+	}
+	return -1;
+}
+
+/* this will try lower page sizes first */
+static void *
+heap_alloc_on_socket(const char *type, size_t size, int socket,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
+	unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
+	void *ret;
+
+	rte_spinlock_lock(&(heap->lock));
+
+	align = align == 0 ? 1 : align;
+
+	/* for legacy mode, try once and with all flags */
+	if (internal_config.legacy_mem) {
+		ret = heap_alloc(heap, type, size, flags, align, bound, contig);
+		goto alloc_unlock;
+	}
+
+	/*
+	 * we do not pass the size hint here, because even if allocation fails,
+	 * we may still be able to allocate memory from appropriate page sizes,
+	 * we just need to request more memory first.
+	 */
+	ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
+	if (ret != NULL)
+		goto alloc_unlock;
+
+	if (!alloc_mem_on_socket(size, socket, flags, align, bound, contig)) {
+		ret = heap_alloc(heap, type, size, flags, align, bound, contig);
+
+		/* this should have succeeded */
+		if (ret == NULL)
+			RTE_LOG(ERR, EAL, "Error allocating from heap\n");
+	}
+alloc_unlock:
+	rte_spinlock_unlock(&(heap->lock));
+	return ret;
+}
+
+void *
+malloc_heap_alloc(const char *type, size_t size, int socket_arg,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	int socket, i, cur_socket;
+	void *ret;
+
+	/* return NULL if size is 0 or alignment is not power-of-2 */
+	if (size == 0 || (align && !rte_is_power_of_2(align)))
+		return NULL;
+
+	if (!rte_eal_has_hugepages())
+		socket_arg = SOCKET_ID_ANY;
+
+	if (socket_arg == SOCKET_ID_ANY)
+		socket = malloc_get_numa_socket();
+	else
+		socket = socket_arg;
+
+	/* Check socket parameter */
+	if (socket >= RTE_MAX_NUMA_NODES)
+		return NULL;
+
+	ret = heap_alloc_on_socket(type, size, socket, flags, align, bound,
+			contig);
+	if (ret != NULL || socket_arg != SOCKET_ID_ANY)
+		return ret;
+
+	/* try other heaps */
+	for (i = 0; i < (int) rte_socket_count(); i++) {
+		cur_socket = rte_socket_id_by_idx(i);
+		if (cur_socket == socket)
+			continue;
+		ret = heap_alloc_on_socket(type, size, cur_socket, flags,
+				align, bound, contig);
+		if (ret != NULL)
+			return ret;
+	}
+	return NULL;
+}
+
 int
 malloc_heap_free(struct malloc_elem *elem)
 {
 	struct malloc_heap *heap;
-	struct malloc_elem *ret;
+	void *start, *aligned_start, *end, *aligned_end;
+	size_t len, aligned_len, page_sz;
+	struct rte_memseg_list *msl;
+	int n_segs, seg_idx, max_seg_idx, ret;
 
 	if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
 		return -1;
 
 	/* elem may be merged with previous element, so keep heap address */
 	heap = elem->heap;
+	msl = elem->msl;
+	page_sz = (size_t)msl->page_sz;
 
 	rte_spinlock_lock(&(heap->lock));
 
-	ret = malloc_elem_free(elem);
+	/* mark element as free */
+	elem->state = ELEM_FREE;
 
-	rte_spinlock_unlock(&(heap->lock));
+	elem = malloc_elem_free(elem);
+
+	/* anything after this is a bonus */
+	ret = 0;
+
+	/* ...of which we can't avail if we are in legacy mode */
+	if (internal_config.legacy_mem)
+		goto free_unlock;
+
+	/* check if we can free any memory back to the system */
+	if (elem->size < page_sz)
+		goto free_unlock;
 
-	return ret != NULL ? 0 : -1;
+	/* probably, but let's make sure, as we may not be using up full page */
+	start = elem;
+	len = elem->size;
+	aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
+	end = RTE_PTR_ADD(elem, len);
+	aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
+
+	aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
+
+	/* can't free anything */
+	if (aligned_len < page_sz)
+		goto free_unlock;
+
+	malloc_elem_free_list_remove(elem);
+
+	malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
+
+	/* we don't really care if we fail to deallocate memory */
+	n_segs = aligned_len / page_sz;
+	seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
+	max_seg_idx = seg_idx + n_segs;
+
+	for (; seg_idx < max_seg_idx; seg_idx++) {
+		struct rte_memseg *ms;
+
+		ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
+		eal_memalloc_free_seg(ms);
+	}
+	heap->total_size -= aligned_len;
+
+	RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
+		msl->socket_id, aligned_len >> 20ULL);
+free_unlock:
+	rte_spinlock_unlock(&(heap->lock));
+	return ret;
 }
 
 int
diff --git a/lib/librte_eal/common/malloc_heap.h b/lib/librte_eal/common/malloc_heap.h
index c57b59a..03b8014 100644
--- a/lib/librte_eal/common/malloc_heap.h
+++ b/lib/librte_eal/common/malloc_heap.h
@@ -26,8 +26,8 @@ malloc_get_numa_socket(void)
 }
 
 void *
-malloc_heap_alloc(struct malloc_heap *heap,	const char *type, size_t size,
-		unsigned int flags, size_t align, size_t bound, bool contig);
+malloc_heap_alloc(const char *type, size_t size, int socket, unsigned int flags,
+		size_t align, size_t bound, bool contig);
 
 int
 malloc_heap_free(struct malloc_elem *elem);
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index c6d3e57..b51a6d1 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -40,10 +40,6 @@ void *
 rte_malloc_socket(const char *type, size_t size, unsigned int align,
 		int socket_arg)
 {
-	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-	int socket, i;
-	void *ret;
-
 	/* return NULL if size is 0 or alignment is not power-of-2 */
 	if (size == 0 || (align && !rte_is_power_of_2(align)))
 		return NULL;
@@ -51,33 +47,12 @@ rte_malloc_socket(const char *type, size_t size, unsigned int align,
 	if (!rte_eal_has_hugepages())
 		socket_arg = SOCKET_ID_ANY;
 
-	if (socket_arg == SOCKET_ID_ANY)
-		socket = malloc_get_numa_socket();
-	else
-		socket = socket_arg;
-
 	/* Check socket parameter */
-	if (socket >= RTE_MAX_NUMA_NODES)
+	if (socket_arg >= RTE_MAX_NUMA_NODES)
 		return NULL;
 
-	ret = malloc_heap_alloc(&mcfg->malloc_heaps[socket], type,
-				size, 0, align == 0 ? 1 : align, 0, false);
-	if (ret != NULL || socket_arg != SOCKET_ID_ANY)
-		return ret;
-
-	/* try other heaps */
-	for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
-		/* we already tried this one */
-		if (i == socket)
-			continue;
-
-		ret = malloc_heap_alloc(&mcfg->malloc_heaps[i], type,
-				size, 0, align == 0 ? 1 : align, 0, false);
-		if (ret != NULL)
-			return ret;
-	}
-
-	return NULL;
+	return malloc_heap_alloc(type, size, socket_arg, 0,
+			align == 0 ? 1 : align, 0, false);
 }
 
 /*
-- 
2.7.4


More information about the dev mailing list