diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_user_pages.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_user_pages.c | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c new file mode 100644 index 000000000..f4b5f0505 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mm.h> +#include <linux/sched/signal.h> +#include <linux/device.h> + +#include "qib.h" + +static void __qib_release_user_pages(struct page **p, size_t num_pages, + int dirty) +{ + unpin_user_pages_dirty_lock(p, num_pages, dirty); +} + +/* + * qib_map_page - a safety wrapper around pci_map_page() + * + * A dma_addr of all 0's is interpreted by the chip as "disabled". + * Unfortunately, it can also be a valid dma_addr returned on some + * architectures. + * + * The powerpc iommu assigns dma_addrs in ascending order, so we don't + * have to bother with retries or mapping a dummy page to insure we + * don't just get the same mapping again. + * + * I'm sure we won't be so lucky with other iommu's, so FIXME. + */ +int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr) +{ + dma_addr_t phys; + + phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&hwdev->dev, phys)) + return -ENOMEM; + + if (!phys) { + dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE); + phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&hwdev->dev, phys)) + return -ENOMEM; + /* + * FIXME: If we get 0 again, we should keep this page, + * map another, then free the 0 page. + */ + } + *daddr = phys; + return 0; +} + +/** + * qib_get_user_pages - lock user pages into memory + * @start_page: the start page + * @num_pages: the number of pages + * @p: the output page structures + * + * This function takes a given start page (page aligned user virtual + * address) and pins it and the following specified number of pages. For + * now, num_pages is always 1, but that will probably change at some point + * (because caller is doing expected sends on a single virtually contiguous + * buffer, so we can do all pages at once). + */ +int qib_get_user_pages(unsigned long start_page, size_t num_pages, + struct page **p) +{ + unsigned long locked, lock_limit; + size_t got; + int ret; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm); + + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { + ret = -ENOMEM; + goto bail; + } + + mmap_read_lock(current->mm); + for (got = 0; got < num_pages; got += ret) { + ret = pin_user_pages(start_page + got * PAGE_SIZE, + num_pages - got, + FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE, + p + got, NULL); + if (ret < 0) { + mmap_read_unlock(current->mm); + goto bail_release; + } + } + mmap_read_unlock(current->mm); + + return 0; +bail_release: + __qib_release_user_pages(p, got, 0); +bail: + atomic64_sub(num_pages, ¤t->mm->pinned_vm); + return ret; +} + +void qib_release_user_pages(struct page **p, size_t num_pages) +{ + __qib_release_user_pages(p, num_pages, 1); + + /* during close after signal, mm can be NULL */ + if (current->mm) + atomic64_sub(num_pages, ¤t->mm->pinned_vm); +} |