summaryrefslogtreecommitdiffstats
path: root/mm/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c32
1 files changed, 9 insertions, 23 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 7c09c47e53..3e9724716b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -14,11 +14,6 @@
#define pr_fmt(fmt) "cma: " fmt
-#ifdef CONFIG_CMA_DEBUG
-#ifndef DEBUG
-# define DEBUG
-#endif
-#endif
#define CREATE_TRACE_POINTS
#include <linux/memblock.h>
@@ -187,10 +182,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
if (!size || !memblock_is_region_reserved(base, size))
return -EINVAL;
- /* alignment should be aligned with order_per_bit */
- if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
- return -EINVAL;
-
/* ensure minimal alignment required by mm core */
if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
return -EINVAL;
@@ -387,7 +378,6 @@ err:
return ret;
}
-#ifdef CONFIG_CMA_DEBUG
static void cma_debug_show_areas(struct cma *cma)
{
unsigned long next_zero_bit, next_set_bit, nr_zero;
@@ -412,9 +402,6 @@ static void cma_debug_show_areas(struct cma *cma)
pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
spin_unlock_irq(&cma->lock);
}
-#else
-static inline void cma_debug_show_areas(struct cma *cma) { }
-#endif
/**
* cma_alloc() - allocate pages from contiguous area
@@ -436,17 +423,18 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
unsigned long i;
struct page *page = NULL;
int ret = -ENOMEM;
+ const char *name = cma ? cma->name : NULL;
+
+ trace_cma_alloc_start(name, count, align);
if (!cma || !cma->count || !cma->bitmap)
- goto out;
+ return page;
pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
(void *)cma, cma->name, count, align);
if (!count)
- goto out;
-
- trace_cma_alloc_start(cma->name, count, align);
+ return page;
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align);
@@ -454,7 +442,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
if (bitmap_count > bitmap_maxno)
- goto out;
+ return page;
for (;;) {
spin_lock_irq(&cma->lock);
@@ -496,8 +484,6 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
start = bitmap_no + mask + 1;
}
- trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret);
-
/*
* CMA can allocate multiple page blocks, which results in different
* blocks being marked with different tags. Reset the tags to ignore
@@ -515,14 +501,13 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
}
pr_debug("%s(): returned %p\n", __func__, page);
-out:
+ trace_cma_alloc_finish(name, pfn, page, count, align, ret);
if (page) {
count_vm_event(CMA_ALLOC_SUCCESS);
cma_sysfs_account_success_pages(cma, count);
} else {
count_vm_event(CMA_ALLOC_FAIL);
- if (cma)
- cma_sysfs_account_fail_pages(cma, count);
+ cma_sysfs_account_fail_pages(cma, count);
}
return page;
@@ -573,6 +558,7 @@ bool cma_release(struct cma *cma, const struct page *pages,
free_contig_range(pfn, count);
cma_clear_bitmap(cma, pfn, count);
+ cma_sysfs_account_release_pages(cma, count);
trace_cma_release(cma->name, pfn, pages, count);
return true;