summaryrefslogtreecommitdiffstats
path: root/tools/testing/nvdimm/pmem-dax.c
blob: c1ec099a3b1d0515f552d16a7934618f4fdcd944 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2014-2016, Intel Corporation.
 */
#include "test/nfit_test.h"
#include <linux/blkdev.h>
#include <linux/dax.h>
#include <pmem.h>
#include <nd.h>

long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
		long nr_pages, enum dax_access_mode mode, void **kaddr,
		pfn_t *pfn)
{
	resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;

	if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
					PFN_PHYS(nr_pages))))
		return -EIO;

	/*
	 * Limit dax to a single page at a time given vmalloc()-backed
	 * in the nfit_test case.
	 */
	if (get_nfit_res(pmem->phys_addr + offset)) {
		struct page *page;

		if (kaddr)
			*kaddr = pmem->virt_addr + offset;
		page = vmalloc_to_page(pmem->virt_addr + offset);
		if (pfn)
			*pfn = page_to_pfn_t(page);
		pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
				__func__, pmem, pgoff, page_to_pfn(page));

		return 1;
	}

	if (kaddr)
		*kaddr = pmem->virt_addr + offset;
	if (pfn)
		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);

	/*
	 * If badblocks are present, limit known good range to the
	 * requested range.
	 */
	if (unlikely(pmem->bb.count))
		return nr_pages;
	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}