summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /drivers/char
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/Kconfig16
-rw-r--r--drivers/char/agp/Makefile2
-rw-r--r--drivers/char/agp/hp-agp.c550
-rw-r--r--drivers/char/agp/i460-agp.c659
-rw-r--r--drivers/char/hpet.c33
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/hisi-rng.c2
-rw-r--r--drivers/char/hw_random/imx-rngc.c10
-rw-r--r--drivers/char/hw_random/jh7110-trng.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c26
-rw-r--r--drivers/char/hw_random/meson-rng.c80
-rw-r--r--drivers/char/hw_random/mpfs-rng.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c10
-rw-r--r--drivers/char/hw_random/nomadik-rng.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c6
-rw-r--r--drivers/char/hw_random/st-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c512
-rw-r--r--drivers/char/hw_random/xgene-rng.c1
-rw-r--r--drivers/char/hw_random/xiphera-trng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c11
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c1
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/mem.c16
-rw-r--r--drivers/char/mspec.c295
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c138
31 files changed, 562 insertions, 1830 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 625af75833..7c8dd0abcf 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config DEVPORT
device is similar to /dev/mem, but for I/O ports.
config HPET
- bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ bool "HPET - High Precision Event Timer" if X86
default n
depends on ACPI
help
@@ -377,7 +377,7 @@ config HPET_MMAP_DEFAULT
config HANGCHECK_TIMER
tristate "Hangcheck timer"
- depends on X86 || IA64 || PPC64 || S390
+ depends on X86 || PPC64 || S390
help
The hangcheck-timer module detects when the system has gone
out to lunch past a certain margin. It can reboot the system
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index c5f532e412..e9b360cdc9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
-obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_IBM_BSR) += bsr.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 4f501e4842..c47eb7bf06 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig AGP
tristate "/dev/agpgart (AGP Support)"
- depends on ALPHA || IA64 || PARISC || PPC || X86
+ depends on ALPHA || PARISC || PPC || X86
depends on PCI
help
AGP (Accelerated Graphics Port) is a bus system mainly used to
@@ -109,20 +109,6 @@ config AGP_VIA
This option gives you AGP support for the GLX component of
X on VIA MVP3/Apollo Pro chipsets.
-config AGP_I460
- tristate "Intel 460GX chipset support"
- depends on AGP && IA64
- help
- This option gives you AGP GART support for the Intel 460GX chipset
- for IA64 processors.
-
-config AGP_HP_ZX1
- tristate "HP ZX1 chipset AGP support"
- depends on AGP && IA64
- help
- This option gives you AGP GART support for the HP ZX1 chipset
- for IA64 processors.
-
config AGP_PARISC
tristate "HP Quicksilver AGP support"
depends on AGP && PARISC && 64BIT && IOMMU_SBA
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 90ed8c789e..25834557e4 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -14,9 +14,7 @@ obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
-obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
-obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
deleted file mode 100644
index 84d9adbb62..0000000000
--- a/drivers/char/agp/hp-agp.c
+++ /dev/null
@@ -1,550 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HP zx1 AGPGART routines.
- *
- * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include <linux/log2.h>
-#include <linux/slab.h>
-
-#include <asm/acpi-ext.h>
-
-#include "agp.h"
-
-#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
-
-/* HP ZX1 IOC registers */
-#define HP_ZX1_IBASE 0x300
-#define HP_ZX1_IMASK 0x308
-#define HP_ZX1_PCOM 0x310
-#define HP_ZX1_TCNFG 0x318
-#define HP_ZX1_PDIR_BASE 0x320
-
-#define HP_ZX1_IOVA_BASE GB(1UL)
-#define HP_ZX1_IOVA_SIZE GB(1UL)
-#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
-#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
-
-#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
-#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
-
-#define AGP8X_MODE_BIT 3
-#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
-
-/* AGP bridge need not be PCI device, but DRM thinks it is. */
-static struct pci_dev fake_bridge_dev;
-
-static int hp_zx1_gart_found;
-
-static struct aper_size_info_fixed hp_zx1_sizes[] =
-{
- {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
-};
-
-static struct gatt_mask hp_zx1_masks[] =
-{
- {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
-};
-
-static struct _hp_private {
- volatile u8 __iomem *ioc_regs;
- volatile u8 __iomem *lba_regs;
- int lba_cap_offset;
- u64 *io_pdir; // PDIR for entire IOVA
- u64 *gatt; // PDIR just for GART (subset of above)
- u64 gatt_entries;
- u64 iova_base;
- u64 gart_base;
- u64 gart_size;
- u64 io_pdir_size;
- int io_pdir_owner; // do we own it, or share it with sba_iommu?
- int io_page_size;
- int io_tlb_shift;
- int io_tlb_ps; // IOC ps config
- int io_pages_per_kpage;
-} hp_private;
-
-static int __init hp_zx1_ioc_shared(void)
-{
- struct _hp_private *hp = &hp_private;
-
- printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
-
- /*
- * IOC already configured by sba_iommu module; just use
- * its setup. We assume:
- * - IOVA space is 1Gb in size
- * - first 512Mb is IOMMU, second 512Mb is GART
- */
- hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
- switch (hp->io_tlb_ps) {
- case 0: hp->io_tlb_shift = 12; break;
- case 1: hp->io_tlb_shift = 13; break;
- case 2: hp->io_tlb_shift = 14; break;
- case 3: hp->io_tlb_shift = 16; break;
- default:
- printk(KERN_ERR PFX "Invalid IOTLB page size "
- "configuration 0x%x\n", hp->io_tlb_ps);
- hp->gatt = NULL;
- hp->gatt_entries = 0;
- return -ENODEV;
- }
- hp->io_page_size = 1 << hp->io_tlb_shift;
- hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
-
- hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
- hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
-
- hp->gart_size = HP_ZX1_GART_SIZE;
- hp->gatt_entries = hp->gart_size / hp->io_page_size;
-
- hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
- hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
-
- if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
- /* Normal case when no AGP device in system */
- hp->gatt = NULL;
- hp->gatt_entries = 0;
- printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
- "GART disabled\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int __init
-hp_zx1_ioc_owner (void)
-{
- struct _hp_private *hp = &hp_private;
-
- printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
-
- /*
- * Select an IOV page size no larger than system page size.
- */
- if (PAGE_SIZE >= KB(64)) {
- hp->io_tlb_shift = 16;
- hp->io_tlb_ps = 3;
- } else if (PAGE_SIZE >= KB(16)) {
- hp->io_tlb_shift = 14;
- hp->io_tlb_ps = 2;
- } else if (PAGE_SIZE >= KB(8)) {
- hp->io_tlb_shift = 13;
- hp->io_tlb_ps = 1;
- } else {
- hp->io_tlb_shift = 12;
- hp->io_tlb_ps = 0;
- }
- hp->io_page_size = 1 << hp->io_tlb_shift;
- hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
-
- hp->iova_base = HP_ZX1_IOVA_BASE;
- hp->gart_size = HP_ZX1_GART_SIZE;
- hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
-
- hp->gatt_entries = hp->gart_size / hp->io_page_size;
- hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
-
- return 0;
-}
-
-static int __init
-hp_zx1_ioc_init (u64 hpa)
-{
- struct _hp_private *hp = &hp_private;
-
- hp->ioc_regs = ioremap(hpa, 1024);
- if (!hp->ioc_regs)
- return -ENOMEM;
-
- /*
- * If the IOTLB is currently disabled, we can take it over.
- * Otherwise, we have to share with sba_iommu.
- */
- hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
-
- if (hp->io_pdir_owner)
- return hp_zx1_ioc_owner();
-
- return hp_zx1_ioc_shared();
-}
-
-static int
-hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
-{
- u16 status;
- u8 pos, id;
- int ttl = 48;
-
- status = readw(hpa+PCI_STATUS);
- if (!(status & PCI_STATUS_CAP_LIST))
- return 0;
- pos = readb(hpa+PCI_CAPABILITY_LIST);
- while (ttl-- && pos >= 0x40) {
- pos &= ~3;
- id = readb(hpa+pos+PCI_CAP_LIST_ID);
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
- }
- return 0;
-}
-
-static int __init
-hp_zx1_lba_init (u64 hpa)
-{
- struct _hp_private *hp = &hp_private;
- int cap;
-
- hp->lba_regs = ioremap(hpa, 256);
- if (!hp->lba_regs)
- return -ENOMEM;
-
- hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
-
- cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
- if (cap != PCI_CAP_ID_AGP) {
- printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
- cap, hp->lba_cap_offset);
- iounmap(hp->lba_regs);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int
-hp_zx1_fetch_size(void)
-{
- int size;
-
- size = hp_private.gart_size / MB(1);
- hp_zx1_sizes[0].size = size;
- agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
- return size;
-}
-
-static int
-hp_zx1_configure (void)
-{
- struct _hp_private *hp = &hp_private;
-
- agp_bridge->gart_bus_addr = hp->gart_base;
- agp_bridge->capndx = hp->lba_cap_offset;
- agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
-
- if (hp->io_pdir_owner) {
- writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
- readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
- writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
- readl(hp->ioc_regs+HP_ZX1_TCNFG);
- writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
- readl(hp->ioc_regs+HP_ZX1_IMASK);
- writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
- readl(hp->ioc_regs+HP_ZX1_IBASE);
- writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
- readl(hp->ioc_regs+HP_ZX1_PCOM);
- }
-
- return 0;
-}
-
-static void
-hp_zx1_cleanup (void)
-{
- struct _hp_private *hp = &hp_private;
-
- if (hp->ioc_regs) {
- if (hp->io_pdir_owner) {
- writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
- readq(hp->ioc_regs+HP_ZX1_IBASE);
- }
- iounmap(hp->ioc_regs);
- }
- if (hp->lba_regs)
- iounmap(hp->lba_regs);
-}
-
-static void
-hp_zx1_tlbflush (struct agp_memory *mem)
-{
- struct _hp_private *hp = &hp_private;
-
- writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
- readq(hp->ioc_regs+HP_ZX1_PCOM);
-}
-
-static int
-hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
-{
- struct _hp_private *hp = &hp_private;
- int i;
-
- if (hp->io_pdir_owner) {
- hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
- get_order(hp->io_pdir_size));
- if (!hp->io_pdir) {
- printk(KERN_ERR PFX "Couldn't allocate contiguous "
- "memory for I/O PDIR\n");
- hp->gatt = NULL;
- hp->gatt_entries = 0;
- return -ENOMEM;
- }
- memset(hp->io_pdir, 0, hp->io_pdir_size);
-
- hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
- }
-
- for (i = 0; i < hp->gatt_entries; i++) {
- hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
- }
-
- return 0;
-}
-
-static int
-hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
-{
- struct _hp_private *hp = &hp_private;
-
- if (hp->io_pdir_owner)
- free_pages((unsigned long) hp->io_pdir,
- get_order(hp->io_pdir_size));
- else
- hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
- return 0;
-}
-
-static int
-hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
-{
- struct _hp_private *hp = &hp_private;
- int i, k;
- off_t j, io_pg_start;
- int io_pg_count;
-
- if (type != mem->type ||
- agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
- return -EINVAL;
- }
-
- io_pg_start = hp->io_pages_per_kpage * pg_start;
- io_pg_count = hp->io_pages_per_kpage * mem->page_count;
- if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
- return -EINVAL;
- }
-
- j = io_pg_start;
- while (j < (io_pg_start + io_pg_count)) {
- if (hp->gatt[j]) {
- return -EBUSY;
- }
- j++;
- }
-
- if (!mem->is_flushed) {
- global_cache_flush();
- mem->is_flushed = true;
- }
-
- for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- unsigned long paddr;
-
- paddr = page_to_phys(mem->pages[i]);
- for (k = 0;
- k < hp->io_pages_per_kpage;
- k++, j++, paddr += hp->io_page_size) {
- hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
- }
- }
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static int
-hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
-{
- struct _hp_private *hp = &hp_private;
- int i, io_pg_start, io_pg_count;
-
- if (type != mem->type ||
- agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
- return -EINVAL;
- }
-
- io_pg_start = hp->io_pages_per_kpage * pg_start;
- io_pg_count = hp->io_pages_per_kpage * mem->page_count;
- for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
- hp->gatt[i] = agp_bridge->scratch_page;
- }
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static unsigned long
-hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
-{
- return HP_ZX1_PDIR_VALID_BIT | addr;
-}
-
-static void
-hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
-{
- struct _hp_private *hp = &hp_private;
- u32 command;
-
- command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
- command = agp_collect_device_status(bridge, mode, command);
- command |= 0x00000100;
-
- writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
-
- agp_device_command(command, (mode & AGP8X_MODE) != 0);
-}
-
-const struct agp_bridge_driver hp_zx1_driver = {
- .owner = THIS_MODULE,
- .size_type = FIXED_APER_SIZE,
- .configure = hp_zx1_configure,
- .fetch_size = hp_zx1_fetch_size,
- .cleanup = hp_zx1_cleanup,
- .tlb_flush = hp_zx1_tlbflush,
- .mask_memory = hp_zx1_mask_memory,
- .masks = hp_zx1_masks,
- .agp_enable = hp_zx1_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = hp_zx1_create_gatt_table,
- .free_gatt_table = hp_zx1_free_gatt_table,
- .insert_memory = hp_zx1_insert_memory,
- .remove_memory = hp_zx1_remove_memory,
- .alloc_by_type = agp_generic_alloc_by_type,
- .free_by_type = agp_generic_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
- .cant_use_aperture = true,
-};
-
-static int __init
-hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
-{
- struct agp_bridge_data *bridge;
- int error = 0;
-
- error = hp_zx1_ioc_init(ioc_hpa);
- if (error)
- goto fail;
-
- error = hp_zx1_lba_init(lba_hpa);
- if (error)
- goto fail;
-
- bridge = agp_alloc_bridge();
- if (!bridge) {
- error = -ENOMEM;
- goto fail;
- }
- bridge->driver = &hp_zx1_driver;
-
- fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
- fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
- bridge->dev = &fake_bridge_dev;
-
- error = agp_add_bridge(bridge);
- fail:
- if (error)
- hp_zx1_cleanup();
- return error;
-}
-
-static acpi_status __init
-zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
-{
- acpi_handle handle, parent;
- acpi_status status;
- struct acpi_device_info *info;
- u64 lba_hpa, sba_hpa, length;
- int match;
-
- status = hp_acpi_csr_space(obj, &lba_hpa, &length);
- if (ACPI_FAILURE(status))
- return AE_OK; /* keep looking for another bridge */
-
- /* Look for an enclosing IOC scope and find its CSR space */
- handle = obj;
- do {
- status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
- /* TBD check _CID also */
- match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
- kfree(info);
- if (match) {
- status = hp_acpi_csr_space(handle, &sba_hpa, &length);
- if (ACPI_SUCCESS(status))
- break;
- else {
- printk(KERN_ERR PFX "Detected HP ZX1 "
- "AGP LBA but no IOC.\n");
- return AE_OK;
- }
- }
- }
-
- status = acpi_get_parent(handle, &parent);
- handle = parent;
- } while (ACPI_SUCCESS(status));
-
- if (ACPI_FAILURE(status))
- return AE_OK; /* found no enclosing IOC */
-
- if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
- return AE_OK;
-
- printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
- "(ioc=%llx, lba=%llx)\n", (char *)context,
- sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
-
- hp_zx1_gart_found = 1;
- return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
-}
-
-static int __init
-agp_hp_init (void)
-{
- if (agp_off)
- return -EINVAL;
-
- acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
- if (hp_zx1_gart_found)
- return 0;
-
- acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
- if (hp_zx1_gart_found)
- return 0;
-
- return -ENODEV;
-}
-
-static void __exit
-agp_hp_cleanup (void)
-{
-}
-
-module_init(agp_hp_init);
-module_exit(agp_hp_cleanup);
-
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
deleted file mode 100644
index 15b240ea48..0000000000
--- a/drivers/char/agp/i460-agp.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
- * the "Intel 460GTX Chipset Software Developer's Manual":
- * http://www.intel.com/design/archives/itanium/downloads/248704.htm
- */
-/*
- * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
- * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/agp_backend.h>
-#include <linux/log2.h>
-
-#include "agp.h"
-
-#define INTEL_I460_BAPBASE 0x98
-#define INTEL_I460_GXBCTL 0xa0
-#define INTEL_I460_AGPSIZ 0xa2
-#define INTEL_I460_ATTBASE 0xfe200000
-#define INTEL_I460_GATT_VALID (1UL << 24)
-#define INTEL_I460_GATT_COHERENT (1UL << 25)
-
-/*
- * The i460 can operate with large (4MB) pages, but there is no sane way to support this
- * within the current kernel/DRM environment, so we disable the relevant code for now.
- * See also comments in ia64_alloc_page()...
- */
-#define I460_LARGE_IO_PAGES 0
-
-#if I460_LARGE_IO_PAGES
-# define I460_IO_PAGE_SHIFT i460.io_page_shift
-#else
-# define I460_IO_PAGE_SHIFT 12
-#endif
-
-#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
-#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
-#define I460_SRAM_IO_DISABLE (1 << 4)
-#define I460_BAPBASE_ENABLE (1 << 3)
-#define I460_AGPSIZ_MASK 0x7
-#define I460_4M_PS (1 << 1)
-
-/* Control bits for Out-Of-GART coherency and Burst Write Combining */
-#define I460_GXBCTL_OOG (1UL << 0)
-#define I460_GXBCTL_BWC (1UL << 2)
-
-/*
- * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
- * gatt_table and gatt_table_real pointers a "void *"...
- */
-#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
-#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
-/*
- * The 460 spec says we have to read the last location written to make sure that all
- * writes have taken effect
- */
-#define WR_FLUSH_GATT(index) RD_GATT(index)
-
-static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- dma_addr_t addr, int type);
-
-static struct {
- void *gatt; /* ioremap'd GATT area */
-
- /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
- u8 io_page_shift;
-
- /* BIOS configures chipset to one of 2 possible apbase values: */
- u8 dynamic_apbase;
-
- /* structure for tracking partial use of 4MB GART pages: */
- struct lp_desc {
- unsigned long *alloced_map; /* bitmap of kernel-pages in use */
- int refcount; /* number of kernel pages using the large page */
- u64 paddr; /* physical address of large page */
- struct page *page; /* page pointer */
- } *lp_desc;
-} i460;
-
-static const struct aper_size_info_8 i460_sizes[3] =
-{
- /*
- * The 32GB aperture is only available with a 4M GART page size. Due to the
- * dynamic GART page size, we can't figure out page_order or num_entries until
- * runtime.
- */
- {32768, 0, 0, 4},
- {1024, 0, 0, 2},
- {256, 0, 0, 1}
-};
-
-static struct gatt_mask i460_masks[] =
-{
- {
- .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
- .type = 0
- }
-};
-
-static int i460_fetch_size (void)
-{
- int i;
- u8 temp;
- struct aper_size_info_8 *values;
-
- /* Determine the GART page size */
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
- i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
- pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
-
- if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
- printk(KERN_ERR PFX
- "I/O (GART) page-size %luKB doesn't match expected "
- "size %luKB\n",
- 1UL << (i460.io_page_shift - 10),
- 1UL << (I460_IO_PAGE_SHIFT));
- return 0;
- }
-
- values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
-
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
-
- /* Exit now if the IO drivers for the GART SRAMS are turned off */
- if (temp & I460_SRAM_IO_DISABLE) {
- printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
- printk(KERN_ERR PFX "AGPGART operation not possible\n");
- return 0;
- }
-
- /* Make sure we don't try to create an 2 ^ 23 entry GATT */
- if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
- printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
- return 0;
- }
-
- /* Determine the proper APBASE register */
- if (temp & I460_BAPBASE_ENABLE)
- i460.dynamic_apbase = INTEL_I460_BAPBASE;
- else
- i460.dynamic_apbase = AGP_APBASE;
-
- for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
- /*
- * Dynamically calculate the proper num_entries and page_order values for
- * the define aperture sizes. Take care not to shift off the end of
- * values[i].size.
- */
- values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
- values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
- }
-
- for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
- /* Neglect control bits when matching up size_value */
- if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
- agp_bridge->aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-/* There isn't anything to do here since 460 has no GART TLB. */
-static void i460_tlb_flush (struct agp_memory *mem)
-{
- return;
-}
-
-/*
- * This utility function is needed to prevent corruption of the control bits
- * which are stored along with the aperture size in 460's AGPSIZ register
- */
-static void i460_write_agpsiz (u8 size_value)
-{
- u8 temp;
-
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
- pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
- ((temp & ~I460_AGPSIZ_MASK) | size_value));
-}
-
-static void i460_cleanup (void)
-{
- struct aper_size_info_8 *previous_size;
-
- previous_size = A_SIZE_8(agp_bridge->previous_size);
- i460_write_agpsiz(previous_size->size_value);
-
- if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
- kfree(i460.lp_desc);
-}
-
-static int i460_configure (void)
-{
- union {
- u32 small[2];
- u64 large;
- } temp;
- size_t size;
- u8 scratch;
- struct aper_size_info_8 *current_size;
-
- temp.large = 0;
-
- current_size = A_SIZE_8(agp_bridge->current_size);
- i460_write_agpsiz(current_size->size_value);
-
- /*
- * Do the necessary rigmarole to read all eight bytes of APBASE.
- * This has to be done since the AGP aperture can be above 4GB on
- * 460 based systems.
- */
- pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
- pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
-
- /* Clear BAR control bits */
- agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
-
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
- pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
- (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
-
- /*
- * Initialize partial allocation trackers if a GART page is bigger than a kernel
- * page.
- */
- if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
- size = current_size->num_entries * sizeof(i460.lp_desc[0]);
- i460.lp_desc = kzalloc(size, GFP_KERNEL);
- if (!i460.lp_desc)
- return -ENOMEM;
- }
- return 0;
-}
-
-static int i460_create_gatt_table (struct agp_bridge_data *bridge)
-{
- int page_order, num_entries, i;
- void *temp;
-
- /*
- * Load up the fixed address of the GART SRAMS which hold our GATT table.
- */
- temp = agp_bridge->current_size;
- page_order = A_SIZE_8(temp)->page_order;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
- if (!i460.gatt) {
- printk(KERN_ERR PFX "ioremap failed\n");
- return -ENOMEM;
- }
-
- /* These are no good, the should be removed from the agp_bridge strucure... */
- agp_bridge->gatt_table_real = NULL;
- agp_bridge->gatt_table = NULL;
- agp_bridge->gatt_bus_addr = 0;
-
- for (i = 0; i < num_entries; ++i)
- WR_GATT(i, 0);
- WR_FLUSH_GATT(i - 1);
- return 0;
-}
-
-static int i460_free_gatt_table (struct agp_bridge_data *bridge)
-{
- int num_entries, i;
- void *temp;
-
- temp = agp_bridge->current_size;
-
- num_entries = A_SIZE_8(temp)->num_entries;
-
- for (i = 0; i < num_entries; ++i)
- WR_GATT(i, 0);
- WR_FLUSH_GATT(num_entries - 1);
-
- iounmap(i460.gatt);
- return 0;
-}
-
-/*
- * The following functions are called when the I/O (GART) page size is smaller than
- * PAGE_SIZE.
- */
-
-static int i460_insert_memory_small_io_page (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- unsigned long paddr, io_pg_start, io_page_size;
- int i, j, k, num_entries;
- void *temp;
-
- pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
- mem, pg_start, type, page_to_phys(mem->pages[0]));
-
- if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
- return -EINVAL;
-
- io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
- printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
- return -EINVAL;
- }
-
- j = io_pg_start;
- while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
- if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
- pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
- j, RD_GATT(j));
- return -EBUSY;
- }
- j++;
- }
-
- io_page_size = 1UL << I460_IO_PAGE_SHIFT;
- for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- paddr = page_to_phys(mem->pages[i]);
- for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
- WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
- }
- WR_FLUSH_GATT(j - 1);
- return 0;
-}
-
-static int i460_remove_memory_small_io_page(struct agp_memory *mem,
- off_t pg_start, int type)
-{
- int i;
-
- pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
- mem, pg_start, type);
-
- pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
-
- for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
- WR_GATT(i, 0);
- WR_FLUSH_GATT(i - 1);
- return 0;
-}
-
-#if I460_LARGE_IO_PAGES
-
-/*
- * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
- *
- * This situation is interesting since AGP memory allocations that are smaller than a
- * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
- * large GART pages to work around this issue.
- *
- * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
- * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
- * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
- */
-
-static int i460_alloc_large_page (struct lp_desc *lp)
-{
- unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
- size_t map_size;
-
- lp->page = alloc_pages(GFP_KERNEL, order);
- if (!lp->page) {
- printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
- return -ENOMEM;
- }
-
- map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
- lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
- if (!lp->alloced_map) {
- __free_pages(lp->page, order);
- printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
- return -ENOMEM;
- }
-
- lp->paddr = page_to_phys(lp->page);
- lp->refcount = 0;
- atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
- return 0;
-}
-
-static void i460_free_large_page (struct lp_desc *lp)
-{
- kfree(lp->alloced_map);
- lp->alloced_map = NULL;
-
- __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
- atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
-}
-
-static int i460_insert_memory_large_io_page (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- int i, start_offset, end_offset, idx, pg, num_entries;
- struct lp_desc *start, *end, *lp;
- void *temp;
-
- if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
- return -EINVAL;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- /* Figure out what pg_start means in terms of our large GART pages */
- start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
- end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
- start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
- end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
-
- if (end > i460.lp_desc + num_entries) {
- printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
- return -EINVAL;
- }
-
- /* Check if the requested region of the aperture is free */
- for (lp = start; lp <= end; ++lp) {
- if (!lp->alloced_map)
- continue; /* OK, the entire large page is available... */
-
- for (idx = ((lp == start) ? start_offset : 0);
- idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
- idx++)
- {
- if (test_bit(idx, lp->alloced_map))
- return -EBUSY;
- }
- }
-
- for (lp = start, i = 0; lp <= end; ++lp) {
- if (!lp->alloced_map) {
- /* Allocate new GART pages... */
- if (i460_alloc_large_page(lp) < 0)
- return -ENOMEM;
- pg = lp - i460.lp_desc;
- WR_GATT(pg, i460_mask_memory(agp_bridge,
- lp->paddr, 0));
- WR_FLUSH_GATT(pg);
- }
-
- for (idx = ((lp == start) ? start_offset : 0);
- idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
- idx++, i++)
- {
- mem->pages[i] = lp->page;
- __set_bit(idx, lp->alloced_map);
- ++lp->refcount;
- }
- }
- return 0;
-}
-
-static int i460_remove_memory_large_io_page (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- int i, pg, start_offset, end_offset, idx, num_entries;
- struct lp_desc *start, *end, *lp;
- void *temp;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- /* Figure out what pg_start means in terms of our large GART pages */
- start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
- end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
- start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
- end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
-
- for (i = 0, lp = start; lp <= end; ++lp) {
- for (idx = ((lp == start) ? start_offset : 0);
- idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
- idx++, i++)
- {
- mem->pages[i] = NULL;
- __clear_bit(idx, lp->alloced_map);
- --lp->refcount;
- }
-
- /* Free GART pages if they are unused */
- if (lp->refcount == 0) {
- pg = lp - i460.lp_desc;
- WR_GATT(pg, 0);
- WR_FLUSH_GATT(pg);
- i460_free_large_page(lp);
- }
- }
- return 0;
-}
-
-/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
-
-static int i460_insert_memory (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
- return i460_insert_memory_small_io_page(mem, pg_start, type);
- else
- return i460_insert_memory_large_io_page(mem, pg_start, type);
-}
-
-static int i460_remove_memory (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
- return i460_remove_memory_small_io_page(mem, pg_start, type);
- else
- return i460_remove_memory_large_io_page(mem, pg_start, type);
-}
-
-/*
- * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
- * allocate memory until we know where it is to be bound in the aperture (a
- * multi-kernel-page alloc might fit inside of an already allocated GART page).
- *
- * Let's just hope nobody counts on the allocated AGP memory being there before bind time
- * (I don't think current drivers do)...
- */
-static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
-{
- void *page;
-
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
- page = agp_generic_alloc_page(agp_bridge);
- } else
- /* Returning NULL would cause problems */
- /* AK: really dubious code. */
- page = (void *)~0UL;
- return page;
-}
-
-static void i460_destroy_page (struct page *page, int flags)
-{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
- agp_generic_destroy_page(page, flags);
- }
-}
-
-#endif /* I460_LARGE_IO_PAGES */
-
-static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Make sure the returned address is a valid GATT entry */
- return bridge->driver->masks[0].mask
- | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
-}
-
-const struct agp_bridge_driver intel_i460_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = i460_sizes,
- .size_type = U8_APER_SIZE,
- .num_aperture_sizes = 3,
- .configure = i460_configure,
- .fetch_size = i460_fetch_size,
- .cleanup = i460_cleanup,
- .tlb_flush = i460_tlb_flush,
- .mask_memory = i460_mask_memory,
- .masks = i460_masks,
- .agp_enable = agp_generic_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = i460_create_gatt_table,
- .free_gatt_table = i460_free_gatt_table,
-#if I460_LARGE_IO_PAGES
- .insert_memory = i460_insert_memory,
- .remove_memory = i460_remove_memory,
- .agp_alloc_page = i460_alloc_page,
- .agp_destroy_page = i460_destroy_page,
-#else
- .insert_memory = i460_insert_memory_small_io_page,
- .remove_memory = i460_remove_memory_small_io_page,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
-#endif
- .alloc_by_type = agp_generic_alloc_by_type,
- .free_by_type = agp_generic_free_by_type,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
- .cant_use_aperture = true,
-};
-
-static int agp_intel_i460_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct agp_bridge_data *bridge;
- u8 cap_ptr;
-
- cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
- if (!cap_ptr)
- return -ENODEV;
-
- bridge = agp_alloc_bridge();
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver = &intel_i460_driver;
- bridge->dev = pdev;
- bridge->capndx = cap_ptr;
-
- printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
-
- pci_set_drvdata(pdev, bridge);
- return agp_add_bridge(bridge);
-}
-
-static void agp_intel_i460_remove(struct pci_dev *pdev)
-{
- struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-
- agp_remove_bridge(bridge);
- agp_put_bridge(bridge);
-}
-
-static struct pci_device_id agp_intel_i460_pci_table[] = {
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_84460GX,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
-
-static struct pci_driver agp_intel_i460_pci_driver = {
- .name = "agpgart-intel-i460",
- .id_table = agp_intel_i460_pci_table,
- .probe = agp_intel_i460_probe,
- .remove = agp_intel_i460_remove,
-};
-
-static int __init agp_intel_i460_init(void)
-{
- if (agp_off)
- return -EINVAL;
- return pci_register_driver(&agp_intel_i460_pci_driver);
-}
-
-static void __exit agp_intel_i460_cleanup(void)
-{
- pci_unregister_driver(&agp_intel_i460_pci_driver);
-}
-
-module_init(agp_intel_i460_init);
-module_exit(agp_intel_i460_cleanup);
-
-MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ee71376f17..9c90b1d2c0 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -64,25 +64,6 @@
static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
-/* This clocksource driver currently only works on ia64 */
-#ifdef CONFIG_IA64
-static void __iomem *hpet_mctr;
-
-static u64 read_hpet(struct clocksource *cs)
-{
- return (u64)read_counter((void __iomem *)hpet_mctr);
-}
-
-static struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-static struct clocksource *hpet_clocksource;
-#endif
-
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
@@ -111,7 +92,7 @@ struct hpets {
unsigned long hp_delta;
unsigned int hp_ntimer;
unsigned int hp_which;
- struct hpet_dev hp_dev[];
+ struct hpet_dev hp_dev[] __counted_by(hp_ntimer);
};
static struct hpets *hpets;
@@ -728,7 +709,6 @@ static struct ctl_table hpet_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {}
};
static struct ctl_table_header *sysctl_header;
@@ -907,17 +887,6 @@ int hpet_alloc(struct hpet_data *hdp)
hpetp->hp_delta = hpet_calibrate(hpetp);
-/* This clocksource driver currently only works on ia64 */
-#ifdef CONFIG_IA64
- if (!hpet_clocksource) {
- hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
- clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
- clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
- hpetp->hp_clocksource = &clocksource_hpet;
- hpet_clocksource = &clocksource_hpet;
- }
-#endif
-
return 0;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8de74dcfa1..442c40efb2 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
- depends on (X86 || IA64 || COMPILE_TEST) && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 4c08efe7f3..b03e803006 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -149,8 +149,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
-
/* map peripheral */
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
index 96438f85ca..b6f27566e0 100644
--- a/drivers/char/hw_random/hisi-rng.c
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -79,8 +79,6 @@ static int hisi_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
- platform_set_drvdata(pdev, rng);
-
rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index e4b385b01b..118a72acb9 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -51,8 +51,8 @@
#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
-#define RNGC_TIMEOUT 3000 /* 3 sec */
-
+#define RNGC_SELFTEST_TIMEOUT 2500 /* us */
+#define RNGC_SEED_TIMEOUT 200 /* ms */
static bool self_test = true;
module_param(self_test, bool, 0);
@@ -110,7 +110,8 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
+ ret = wait_for_completion_timeout(&rngc->rng_op_done,
+ usecs_to_jiffies(RNGC_SELFTEST_TIMEOUT));
imx_rngc_irq_mask_clear(rngc);
if (!ret)
return -ETIMEDOUT;
@@ -182,7 +183,8 @@ static int imx_rngc_init(struct hwrng *rng)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
+ ret = wait_for_completion_timeout(&rngc->rng_op_done,
+ msecs_to_jiffies(RNGC_SEED_TIMEOUT));
if (!ret) {
ret = -ETIMEDOUT;
goto err;
diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c
index 38474d48a2..b1f94e3c0c 100644
--- a/drivers/char/hw_random/jh7110-trng.c
+++ b/drivers/char/hw_random/jh7110-trng.c
@@ -300,7 +300,7 @@ static int starfive_trng_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name,
(void *)trng);
if (ret)
- return dev_err_probe(&pdev->dev, irq,
+ return dev_err_probe(&pdev->dev, ret,
"Failed to register interrupt handler\n");
trng->hclk = devm_clk_get(&pdev->dev, "hclk");
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 2f2f21f1b6..dff7b9db70 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -81,7 +81,6 @@ struct trng_regs {
};
struct ks_sa_rng {
- struct device *dev;
struct hwrng rng;
struct clk *clk;
struct regmap *regmap_cfg;
@@ -113,8 +112,7 @@ static unsigned int refill_delay_ns(unsigned long clk_rate)
static int ks_sa_rng_init(struct hwrng *rng)
{
u32 value;
- struct device *dev = (struct device *)rng->priv;
- struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk);
/* Enable RNG module */
@@ -153,8 +151,7 @@ static int ks_sa_rng_init(struct hwrng *rng)
static void ks_sa_rng_cleanup(struct hwrng *rng)
{
- struct device *dev = (struct device *)rng->priv;
- struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
/* Disable RNG */
writel(0, &ks_sa_rng->reg_rng->control);
@@ -164,8 +161,7 @@ static void ks_sa_rng_cleanup(struct hwrng *rng)
static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
{
- struct device *dev = (struct device *)rng->priv;
- struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
/* Read random data */
data[0] = readl(&ks_sa_rng->reg_rng->output_l);
@@ -179,8 +175,7 @@ static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
{
- struct device *dev = (struct device *)rng->priv;
- struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ struct ks_sa_rng *ks_sa_rng = container_of(rng, struct ks_sa_rng, rng);
u64 now = ktime_get_ns();
u32 ready;
@@ -217,7 +212,6 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (!ks_sa_rng)
return -ENOMEM;
- ks_sa_rng->dev = dev;
ks_sa_rng->rng = (struct hwrng) {
.name = "ks_sa_hwrng",
.init = ks_sa_rng_init,
@@ -225,7 +219,6 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
.data_present = ks_sa_rng_data_present,
.cleanup = ks_sa_rng_cleanup,
};
- ks_sa_rng->rng.priv = (unsigned long)dev;
ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks_sa_rng->reg_rng))
@@ -235,21 +228,16 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
syscon_regmap_lookup_by_phandle(dev->of_node,
"ti,syscon-sa-cfg");
- if (IS_ERR(ks_sa_rng->regmap_cfg)) {
- dev_err(dev, "syscon_node_to_regmap failed\n");
- return -EINVAL;
- }
+ if (IS_ERR(ks_sa_rng->regmap_cfg))
+ return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- dev_err(dev, "Failed to enable SA power-domain\n");
pm_runtime_disable(dev);
- return ret;
+ return dev_err_probe(dev, ret, "Failed to enable SA power-domain\n");
}
- platform_set_drvdata(pdev, ks_sa_rng);
-
return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
}
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index a4eb8e35f1..75225eb9fe 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -13,12 +13,23 @@
#include <linux/types.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/iopoll.h>
-#define RNG_DATA 0x00
+#define RNG_DATA 0x00
+#define RNG_S4_DATA 0x08
+#define RNG_S4_CFG 0x00
+
+#define RUN_BIT BIT(0)
+#define SEED_READY_STS_BIT BIT(31)
+
+struct meson_rng_priv {
+ int (*read)(struct hwrng *rng, void *buf, size_t max, bool wait);
+};
struct meson_rng_data {
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
};
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -31,16 +42,62 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
return sizeof(u32);
}
+static int meson_rng_wait_status(void __iomem *cfg_addr, int bit)
+{
+ u32 status = 0;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout_atomic(cfg_addr,
+ status, !(status & bit),
+ 10, 10000);
+ if (ret)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int meson_s4_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct meson_rng_data *data =
+ container_of(rng, struct meson_rng_data, rng);
+
+ void __iomem *cfg_addr = data->base + RNG_S4_CFG;
+ int err;
+
+ writel_relaxed(readl_relaxed(cfg_addr) | SEED_READY_STS_BIT, cfg_addr);
+
+ err = meson_rng_wait_status(cfg_addr, SEED_READY_STS_BIT);
+ if (err) {
+ dev_err(data->dev, "Seed isn't ready, try again\n");
+ return err;
+ }
+
+ err = meson_rng_wait_status(cfg_addr, RUN_BIT);
+ if (err) {
+ dev_err(data->dev, "Can't get random number, try again\n");
+ return err;
+ }
+
+ *(u32 *)buf = readl_relaxed(data->base + RNG_S4_DATA);
+
+ return sizeof(u32);
+}
+
static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
struct clk *core_clk;
+ const struct meson_rng_priv *priv;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ priv = device_get_match_data(&pdev->dev);
+ if (!priv)
+ return -ENODEV;
+
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
@@ -51,13 +108,30 @@ static int meson_rng_probe(struct platform_device *pdev)
"Failed to get core clock\n");
data->rng.name = pdev->name;
- data->rng.read = meson_rng_read;
+ data->rng.read = priv->read;
+
+ data->dev = &pdev->dev;
return devm_hwrng_register(dev, &data->rng);
}
+static const struct meson_rng_priv meson_rng_priv = {
+ .read = meson_rng_read,
+};
+
+static const struct meson_rng_priv meson_rng_priv_s4 = {
+ .read = meson_s4_rng_read,
+};
+
static const struct of_device_id meson_rng_of_match[] = {
- { .compatible = "amlogic,meson-rng", },
+ {
+ .compatible = "amlogic,meson-rng",
+ .data = (void *)&meson_rng_priv,
+ },
+ {
+ .compatible = "amlogic,meson-s4-rng",
+ .data = (void *)&meson_rng_priv_s4,
+ },
{},
};
MODULE_DEVICE_TABLE(of, meson_rng_of_match);
diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c
index c6972734ae..0994024daa 100644
--- a/drivers/char/hw_random/mpfs-rng.c
+++ b/drivers/char/hw_random/mpfs-rng.c
@@ -79,8 +79,6 @@ static int mpfs_rng_probe(struct platform_device *pdev)
rng_priv->rng.read = mpfs_rng_read;
rng_priv->rng.name = pdev->name;
- platform_set_drvdata(pdev, rng_priv);
-
ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 73e4081464..aaae16b984 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -14,7 +14,8 @@
#include <linux/hw_random.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/hypervisor.h>
@@ -695,20 +696,15 @@ static void n2rng_driver_version(void)
static const struct of_device_id n2rng_match[];
static int n2rng_probe(struct platform_device *op)
{
- const struct of_device_id *match;
int err = -ENOMEM;
struct n2rng *np;
- match = of_match_device(n2rng_match, &op->dev);
- if (!match)
- return -EINVAL;
-
n2rng_driver_version();
np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL);
if (!np)
goto out;
np->op = op;
- np->data = (struct n2rng_template *)match->data;
+ np->data = (struct n2rng_template *)device_get_match_data(&op->dev);
INIT_DELAYED_WORK(&np->work, n2rng_work);
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 8c6a40d6ce..a2009fc4ad 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -88,4 +88,5 @@ static struct amba_driver nmk_rng_driver = {
module_amba_driver(nmk_rng_driver);
+MODULE_DESCRIPTION("ST-Ericsson Nomadik Random Number Generator");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 8561a09b46..412f544050 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng)
ctl.u64 = 0;
ctl.s.ent_en = 1; /* Enable the entropy source. */
ctl.s.rng_en = 1; /* Enable the RNG hardware. */
- cvmx_write_csr((__force u64)p->control_status, ctl.u64);
+ cvmx_write_csr((unsigned long)p->control_status, ctl.u64);
return 0;
}
@@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng)
ctl.u64 = 0;
/* Disable everything. */
- cvmx_write_csr((__force u64)p->control_status, ctl.u64);
+ cvmx_write_csr((unsigned long)p->control_status, ctl.u64);
}
static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
{
struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
- *data = cvmx_read64_uint32((__force u64)p->result);
+ *data = cvmx_read64_uint32((unsigned long)p->result);
return sizeof(u32);
}
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 6e9dfac9fc..23749817d8 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -121,4 +121,5 @@ static struct platform_driver st_rng_driver = {
module_platform_driver(st_rng_driver);
MODULE_AUTHOR("Pankaj Dev <pankaj.dev@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics HW Random Number Generator");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index efb6a9f9a1..efd6edcd70 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -17,60 +17,234 @@
#include <linux/reset.h>
#include <linux/slab.h>
-#define RNG_CR 0x00
-#define RNG_CR_RNGEN BIT(2)
-#define RNG_CR_CED BIT(5)
-
-#define RNG_SR 0x04
-#define RNG_SR_SEIS BIT(6)
-#define RNG_SR_CEIS BIT(5)
-#define RNG_SR_DRDY BIT(0)
+#define RNG_CR 0x00
+#define RNG_CR_RNGEN BIT(2)
+#define RNG_CR_CED BIT(5)
+#define RNG_CR_CONFIG1 GENMASK(11, 8)
+#define RNG_CR_NISTC BIT(12)
+#define RNG_CR_CONFIG2 GENMASK(15, 13)
+#define RNG_CR_CLKDIV_SHIFT 16
+#define RNG_CR_CLKDIV GENMASK(19, 16)
+#define RNG_CR_CONFIG3 GENMASK(25, 20)
+#define RNG_CR_CONDRST BIT(30)
+#define RNG_CR_CONFLOCK BIT(31)
+#define RNG_CR_ENTROPY_SRC_MASK (RNG_CR_CONFIG1 | RNG_CR_NISTC | RNG_CR_CONFIG2 | RNG_CR_CONFIG3)
+#define RNG_CR_CONFIG_MASK (RNG_CR_ENTROPY_SRC_MASK | RNG_CR_CED | RNG_CR_CLKDIV)
+
+#define RNG_SR 0x04
+#define RNG_SR_DRDY BIT(0)
+#define RNG_SR_CECS BIT(1)
+#define RNG_SR_SECS BIT(2)
+#define RNG_SR_CEIS BIT(5)
+#define RNG_SR_SEIS BIT(6)
+
+#define RNG_DR 0x08
+
+#define RNG_NSCR 0x0C
+#define RNG_NSCR_MASK GENMASK(17, 0)
+
+#define RNG_HTCR 0x10
+
+#define RNG_NB_RECOVER_TRIES 3
+
+struct stm32_rng_data {
+ uint max_clock_rate;
+ u32 cr;
+ u32 nscr;
+ u32 htcr;
+ bool has_cond_reset;
+};
-#define RNG_DR 0x08
+/**
+ * struct stm32_rng_config - RNG configuration data
+ *
+ * @cr: RNG configuration. 0 means default hardware RNG configuration
+ * @nscr: Noise sources control configuration.
+ * @htcr: Health tests configuration.
+ */
+struct stm32_rng_config {
+ u32 cr;
+ u32 nscr;
+ u32 htcr;
+};
struct stm32_rng_private {
struct hwrng rng;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
+ struct stm32_rng_config pm_conf;
+ const struct stm32_rng_data *data;
bool ced;
+ bool lock_conf;
};
+/*
+ * Extracts from the STM32 RNG specification when RNG supports CONDRST.
+ *
+ * When a noise source (or seed) error occurs, the RNG stops generating
+ * random numbers and sets to “1” both SEIS and SECS bits to indicate
+ * that a seed error occurred. (...)
+ *
+ * 1. Software reset by writing CONDRST at 1 and at 0 (see bitfield
+ * description for details). This step is needed only if SECS is set.
+ * Indeed, when SEIS is set and SECS is cleared it means RNG performed
+ * the reset automatically (auto-reset).
+ * 2. If SECS was set in step 1 (no auto-reset) wait for CONDRST
+ * to be cleared in the RNG_CR register, then confirm that SEIS is
+ * cleared in the RNG_SR register. Otherwise just clear SEIS bit in
+ * the RNG_SR register.
+ * 3. If SECS was set in step 1 (no auto-reset) wait for SECS to be
+ * cleared by RNG. The random number generation is now back to normal.
+ */
+static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv)
+{
+ struct device *dev = (struct device *)priv->rng.priv;
+ u32 sr = readl_relaxed(priv->base + RNG_SR);
+ u32 cr = readl_relaxed(priv->base + RNG_CR);
+ int err;
+
+ if (sr & RNG_SR_SECS) {
+ /* Conceal by resetting the subsystem (step 1.) */
+ writel_relaxed(cr | RNG_CR_CONDRST, priv->base + RNG_CR);
+ writel_relaxed(cr & ~RNG_CR_CONDRST, priv->base + RNG_CR);
+ } else {
+ /* RNG auto-reset (step 2.) */
+ writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR);
+ goto end;
+ }
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, cr, !(cr & RNG_CR_CONDRST), 10,
+ 100000);
+ if (err) {
+ dev_err(dev, "%s: timeout %x\n", __func__, sr);
+ return err;
+ }
+
+ /* Check SEIS is cleared (step 2.) */
+ if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
+ return -EINVAL;
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, !(sr & RNG_SR_SECS), 10,
+ 100000);
+ if (err) {
+ dev_err(dev, "%s: timeout %x\n", __func__, sr);
+ return err;
+ }
+
+end:
+ return 0;
+}
+
+/*
+ * Extracts from the STM32 RNG specification, when CONDRST is not supported
+ *
+ * When a noise source (or seed) error occurs, the RNG stops generating
+ * random numbers and sets to “1” both SEIS and SECS bits to indicate
+ * that a seed error occurred. (...)
+ *
+ * The following sequence shall be used to fully recover from a seed
+ * error after the RNG initialization:
+ * 1. Clear the SEIS bit by writing it to “0”.
+ * 2. Read out 12 words from the RNG_DR register, and discard each of
+ * them in order to clean the pipeline.
+ * 3. Confirm that SEIS is still cleared. Random number generation is
+ * back to normal.
+ */
+static int stm32_rng_conceal_seed_error_sw_reset(struct stm32_rng_private *priv)
+{
+ unsigned int i = 0;
+ u32 sr = readl_relaxed(priv->base + RNG_SR);
+
+ writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR);
+
+ for (i = 12; i != 0; i--)
+ (void)readl_relaxed(priv->base + RNG_DR);
+
+ if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_rng_conceal_seed_error(struct hwrng *rng)
+{
+ struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
+
+ dev_dbg((struct device *)priv->rng.priv, "Concealing seed error\n");
+
+ if (priv->data->has_cond_reset)
+ return stm32_rng_conceal_seed_error_cond_reset(priv);
+ else
+ return stm32_rng_conceal_seed_error_sw_reset(priv);
+};
+
+
static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct stm32_rng_private *priv =
- container_of(rng, struct stm32_rng_private, rng);
+ struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
+ unsigned int i = 0;
+ int retval = 0, err = 0;
u32 sr;
- int retval = 0;
pm_runtime_get_sync((struct device *) priv->rng.priv);
+ if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
+ stm32_rng_conceal_seed_error(rng);
+
while (max >= sizeof(u32)) {
sr = readl_relaxed(priv->base + RNG_SR);
- /* Manage timeout which is based on timer and take */
- /* care of initial delay time when enabling rng */
+ /*
+ * Manage timeout which is based on timer and take
+ * care of initial delay time when enabling the RNG.
+ */
if (!sr && wait) {
- int err;
-
err = readl_relaxed_poll_timeout_atomic(priv->base
+ RNG_SR,
sr, sr,
10, 50000);
- if (err)
+ if (err) {
dev_err((struct device *)priv->rng.priv,
"%s: timeout %x!\n", __func__, sr);
+ break;
+ }
+ } else if (!sr) {
+ /* The FIFO is being filled up */
+ break;
}
- /* If error detected or data not ready... */
if (sr != RNG_SR_DRDY) {
- if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
- "bad RNG status - %x\n", sr))
+ if (sr & RNG_SR_SEIS) {
+ err = stm32_rng_conceal_seed_error(rng);
+ i++;
+ if (err && i > RNG_NB_RECOVER_TRIES) {
+ dev_err((struct device *)priv->rng.priv,
+ "Couldn't recover from seed error\n");
+ return -ENOTRECOVERABLE;
+ }
+
+ continue;
+ }
+
+ if (WARN_ONCE((sr & RNG_SR_CEIS), "RNG clock too slow - %x\n", sr))
writel_relaxed(0, priv->base + RNG_SR);
- break;
}
+ /* Late seed error case: DR being 0 is an error status */
*(u32 *)data = readl_relaxed(priv->base + RNG_DR);
+ if (!*(u32 *)data) {
+ err = stm32_rng_conceal_seed_error(rng);
+ i++;
+ if (err && i > RNG_NB_RECOVER_TRIES) {
+ dev_err((struct device *)priv->rng.priv,
+ "Couldn't recover from seed error");
+ return -ENOTRECOVERABLE;
+ }
+ continue;
+ }
+
+ i = 0;
retval += sizeof(u32);
data += sizeof(u32);
max -= sizeof(u32);
@@ -82,54 +256,265 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return retval || !wait ? retval : -EIO;
}
+static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
+{
+ struct stm32_rng_private *priv =
+ container_of(rng, struct stm32_rng_private, rng);
+ unsigned long clock_rate = 0;
+ uint clock_div = 0;
+
+ clock_rate = clk_get_rate(priv->clk);
+
+ /*
+ * Get the exponent to apply on the CLKDIV field in RNG_CR register
+ * No need to handle the case when clock-div > 0xF as it is physically
+ * impossible
+ */
+ while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
+ clock_div++;
+
+ pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div);
+
+ return clock_div;
+}
+
static int stm32_rng_init(struct hwrng *rng)
{
struct stm32_rng_private *priv =
container_of(rng, struct stm32_rng_private, rng);
int err;
+ u32 reg;
err = clk_prepare_enable(priv->clk);
if (err)
return err;
- if (priv->ced)
- writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
- else
- writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
- priv->base + RNG_CR);
-
/* clear error indicators */
writel_relaxed(0, priv->base + RNG_SR);
+ reg = readl_relaxed(priv->base + RNG_CR);
+
+ /*
+ * Keep default RNG configuration if none was specified.
+ * 0 is an invalid value as it disables all entropy sources.
+ */
+ if (priv->data->has_cond_reset && priv->data->cr) {
+ uint clock_div = stm32_rng_clock_freq_restrain(rng);
+
+ reg &= ~RNG_CR_CONFIG_MASK;
+ reg |= RNG_CR_CONDRST | (priv->data->cr & RNG_CR_ENTROPY_SRC_MASK) |
+ (clock_div << RNG_CR_CLKDIV_SHIFT);
+ if (priv->ced)
+ reg &= ~RNG_CR_CED;
+ else
+ reg |= RNG_CR_CED;
+ writel_relaxed(reg, priv->base + RNG_CR);
+
+ /* Health tests and noise control registers */
+ writel_relaxed(priv->data->htcr, priv->base + RNG_HTCR);
+ writel_relaxed(priv->data->nscr & RNG_NSCR_MASK, priv->base + RNG_NSCR);
+
+ reg &= ~RNG_CR_CONDRST;
+ reg |= RNG_CR_RNGEN;
+ if (priv->lock_conf)
+ reg |= RNG_CR_CONFLOCK;
+
+ writel_relaxed(reg, priv->base + RNG_CR);
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg,
+ (!(reg & RNG_CR_CONDRST)),
+ 10, 50000);
+ if (err) {
+ clk_disable_unprepare(priv->clk);
+ dev_err((struct device *)priv->rng.priv,
+ "%s: timeout %x!\n", __func__, reg);
+ return -EINVAL;
+ }
+ } else {
+ /* Handle all RNG versions by checking if conditional reset should be set */
+ if (priv->data->has_cond_reset)
+ reg |= RNG_CR_CONDRST;
+
+ if (priv->ced)
+ reg &= ~RNG_CR_CED;
+ else
+ reg |= RNG_CR_CED;
+
+ writel_relaxed(reg, priv->base + RNG_CR);
+
+ if (priv->data->has_cond_reset)
+ reg &= ~RNG_CR_CONDRST;
+
+ reg |= RNG_CR_RNGEN;
+
+ writel_relaxed(reg, priv->base + RNG_CR);
+ }
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
+ reg & RNG_SR_DRDY,
+ 10, 100000);
+ if (err | (reg & ~RNG_SR_DRDY)) {
+ clk_disable_unprepare(priv->clk);
+ dev_err((struct device *)priv->rng.priv,
+ "%s: timeout:%x SR: %x!\n", __func__, err, reg);
+ return -EINVAL;
+ }
+
return 0;
}
-static void stm32_rng_cleanup(struct hwrng *rng)
+static int stm32_rng_remove(struct platform_device *ofdev)
{
- struct stm32_rng_private *priv =
- container_of(rng, struct stm32_rng_private, rng);
+ pm_runtime_disable(&ofdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
+{
+ struct stm32_rng_private *priv = dev_get_drvdata(dev);
+ u32 reg;
- writel_relaxed(0, priv->base + RNG_CR);
+ reg = readl_relaxed(priv->base + RNG_CR);
+ reg &= ~RNG_CR_RNGEN;
+ writel_relaxed(reg, priv->base + RNG_CR);
clk_disable_unprepare(priv->clk);
+
+ return 0;
}
+static int __maybe_unused stm32_rng_suspend(struct device *dev)
+{
+ struct stm32_rng_private *priv = dev_get_drvdata(dev);
+
+ if (priv->data->has_cond_reset) {
+ priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
+ priv->pm_conf.htcr = readl_relaxed(priv->base + RNG_HTCR);
+ }
+
+ /* Do not save that RNG is enabled as it will be handled at resume */
+ priv->pm_conf.cr = readl_relaxed(priv->base + RNG_CR) & ~RNG_CR_RNGEN;
+
+ writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
+{
+ struct stm32_rng_private *priv = dev_get_drvdata(dev);
+ int err;
+ u32 reg;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ /* Clean error indications */
+ writel_relaxed(0, priv->base + RNG_SR);
+
+ reg = readl_relaxed(priv->base + RNG_CR);
+ reg |= RNG_CR_RNGEN;
+ writel_relaxed(reg, priv->base + RNG_CR);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rng_resume(struct device *dev)
+{
+ struct stm32_rng_private *priv = dev_get_drvdata(dev);
+ int err;
+ u32 reg;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ /* Clean error indications */
+ writel_relaxed(0, priv->base + RNG_SR);
+
+ if (priv->data->has_cond_reset) {
+ /*
+ * Correct configuration in bits [29:4] must be set in the same
+ * access that set RNG_CR_CONDRST bit. Else config setting is
+ * not taken into account. CONFIGLOCK bit must also be unset but
+ * it is not handled at the moment.
+ */
+ writel_relaxed(priv->pm_conf.cr | RNG_CR_CONDRST, priv->base + RNG_CR);
+
+ writel_relaxed(priv->pm_conf.nscr, priv->base + RNG_NSCR);
+ writel_relaxed(priv->pm_conf.htcr, priv->base + RNG_HTCR);
+
+ reg = readl_relaxed(priv->base + RNG_CR);
+ reg |= RNG_CR_RNGEN;
+ reg &= ~RNG_CR_CONDRST;
+ writel_relaxed(reg, priv->base + RNG_CR);
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg,
+ reg & ~RNG_CR_CONDRST, 10, 100000);
+
+ if (err) {
+ clk_disable_unprepare(priv->clk);
+ dev_err((struct device *)priv->rng.priv,
+ "%s: timeout:%x CR: %x!\n", __func__, err, reg);
+ return -EINVAL;
+ }
+ } else {
+ reg = priv->pm_conf.cr;
+ reg |= RNG_CR_RNGEN;
+ writel_relaxed(reg, priv->base + RNG_CR);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
+ stm32_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_rng_suspend,
+ stm32_rng_resume)
+};
+
+static const struct stm32_rng_data stm32mp13_rng_data = {
+ .has_cond_reset = true,
+ .max_clock_rate = 48000000,
+ .cr = 0x00F00D00,
+ .nscr = 0x2B5BB,
+ .htcr = 0x969D,
+};
+
+static const struct stm32_rng_data stm32_rng_data = {
+ .has_cond_reset = false,
+ .max_clock_rate = 3000000,
+};
+
+static const struct of_device_id stm32_rng_match[] = {
+ {
+ .compatible = "st,stm32mp13-rng",
+ .data = &stm32mp13_rng_data,
+ },
+ {
+ .compatible = "st,stm32-rng",
+ .data = &stm32_rng_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_rng_match);
+
static int stm32_rng_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct stm32_rng_private *priv;
- struct resource res;
- int err;
+ struct resource *res;
priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- err = of_address_to_resource(np, 0, &res);
- if (err)
- return err;
-
- priv->base = devm_ioremap_resource(dev, &res);
+ priv->base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -145,14 +530,16 @@ static int stm32_rng_probe(struct platform_device *ofdev)
}
priv->ced = of_property_read_bool(np, "clock-error-detect");
+ priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf");
+
+ priv->data = of_device_get_match_data(dev);
+ if (!priv->data)
+ return -ENODEV;
dev_set_drvdata(dev, priv);
priv->rng.name = dev_driver_string(dev);
-#ifndef CONFIG_PM
priv->rng.init = stm32_rng_init;
- priv->rng.cleanup = stm32_rng_cleanup;
-#endif
priv->rng.read = stm32_rng_read;
priv->rng.priv = (unsigned long) dev;
priv->rng.quality = 900;
@@ -164,51 +551,10 @@ static int stm32_rng_probe(struct platform_device *ofdev)
return devm_hwrng_register(dev, &priv->rng);
}
-static int stm32_rng_remove(struct platform_device *ofdev)
-{
- pm_runtime_disable(&ofdev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int stm32_rng_runtime_suspend(struct device *dev)
-{
- struct stm32_rng_private *priv = dev_get_drvdata(dev);
-
- stm32_rng_cleanup(&priv->rng);
-
- return 0;
-}
-
-static int stm32_rng_runtime_resume(struct device *dev)
-{
- struct stm32_rng_private *priv = dev_get_drvdata(dev);
-
- return stm32_rng_init(&priv->rng);
-}
-#endif
-
-static const struct dev_pm_ops stm32_rng_pm_ops = {
- SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
- stm32_rng_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
-
-
-static const struct of_device_id stm32_rng_match[] = {
- {
- .compatible = "st,stm32-rng",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_rng_match);
-
static struct platform_driver stm32_rng_driver = {
.driver = {
.name = "stm32-rng",
- .pm = &stm32_rng_pm_ops,
+ .pm = pm_ptr(&stm32_rng_pm_ops),
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 99f4e86ac3..7382724bf5 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -321,7 +321,6 @@ static int xgene_rng_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = &pdev->dev;
- platform_set_drvdata(pdev, ctx);
ctx->csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->csr_base))
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
index 2c586d1fe8..4af64f76c8 100644
--- a/drivers/char/hw_random/xiphera-trng.c
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -121,8 +121,6 @@ static int xiphera_trng_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, trng);
-
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 186f1fee75..d6f1427968 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -5377,20 +5377,15 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
j = 0;
while (*p) {
- int size = strlen(p);
+ int size = strnlen(p, 11);
- if (size > 11)
- size = 11;
data[0] = 0;
data[1] = 0;
data[2] = 0xf0; /* OEM event without timestamp. */
data[3] = intf->addrinfo[0].address;
data[4] = j++; /* sequence # */
- /*
- * Always give 11 bytes, so strncpy will fill
- * it with zeroes for me.
- */
- strncpy(data+5, p, 11);
+
+ memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
ipmi_panic_request_and_wait(intf, &addr, &msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 870659d91d..941d2dcc8c 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -656,7 +656,6 @@ static struct ctl_table ipmi_table[] = {
.maxlen = sizeof(poweroff_powercycle),
.mode = 0644,
.proc_handler = proc_dointvec },
- { }
};
static struct ctl_table_header *ipmi_table_header;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index df8dd50b4c..1f7600c361 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1945,7 +1945,7 @@ static int new_ssif_client(int addr, char *adapter_name,
}
}
- strncpy(addr_info->binfo.type, DEVICE_NAME,
+ strscpy(addr_info->binfo.type, DEVICE_NAME,
sizeof(addr_info->binfo.type));
addr_info->binfo.addr = addr;
addr_info->binfo.platform_data = addr_info;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1052b0f2d4..3c6670cf90 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,10 +31,6 @@
#include <linux/uaccess.h>
#include <linux/security.h>
-#ifdef CONFIG_IA64
-# include <linux/efi.h>
-#endif
-
#define DEVMEM_MINOR 1
#define DEVPORT_MINOR 4
@@ -277,13 +273,6 @@ int __weak phys_mem_access_prot_allowed(struct file *file,
#ifdef pgprot_noncached
static int uncached_access(struct file *file, phys_addr_t addr)
{
-#if defined(CONFIG_IA64)
- /*
- * On ia64, we ignore O_DSYNC because we cannot tolerate memory
- * attribute aliases.
- */
- return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-#else
/*
* Accessing memory above the top the kernel knows about or through a
* file pointer
@@ -292,7 +281,6 @@ static int uncached_access(struct file *file, phys_addr_t addr)
if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory);
-#endif
}
#endif
@@ -640,6 +628,7 @@ static int open_port(struct inode *inode, struct file *filp)
#define full_lseek null_lseek
#define write_zero write_null
#define write_iter_zero write_iter_null
+#define splice_write_zero splice_write_null
#define open_mem open_port
static const struct file_operations __maybe_unused mem_fops = {
@@ -677,6 +666,8 @@ static const struct file_operations zero_fops = {
.read_iter = read_iter_zero,
.read = read_zero,
.write_iter = write_iter_zero,
+ .splice_read = copy_splice_read,
+ .splice_write = splice_write_zero,
.mmap = mmap_zero,
.get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
@@ -688,6 +679,7 @@ static const struct file_operations full_fops = {
.llseek = full_lseek,
.read_iter = read_iter_zero,
.write = write_full,
+ .splice_read = copy_splice_read,
};
static const struct memdev {
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
deleted file mode 100644
index b35f651837..0000000000
--- a/drivers/char/mspec.c
+++ /dev/null
@@ -1,295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
- * reserved.
- */
-
-/*
- * SN Platform Special Memory (mspec) Support
- *
- * This driver exports the SN special memory (mspec) facility to user
- * processes.
- * There are two types of memory made available thru this driver:
- * uncached and cached.
- *
- * Uncached are used for memory write combining feature of the ia64
- * cpu.
- *
- * Cached are used for areas of memory that are used as cached addresses
- * on our partition and used as uncached addresses from other partitions.
- * Due to a design constraint of the SN2 Shub, you can not have processors
- * on the same FSB perform both a cached and uncached reference to the
- * same cache line. These special memory cached regions prevent the
- * kernel from ever dropping in a TLB entry and therefore prevent the
- * processor from ever speculating a cache line from this page.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/numa.h>
-#include <linux/refcount.h>
-#include <asm/page.h>
-#include <linux/atomic.h>
-#include <asm/tlbflush.h>
-#include <asm/uncached.h>
-
-
-#define CACHED_ID "Cached,"
-#define UNCACHED_ID "Uncached"
-#define REVISION "4.0"
-#define MSPEC_BASENAME "mspec"
-
-/*
- * Page types allocated by the device.
- */
-enum mspec_page_type {
- MSPEC_CACHED = 2,
- MSPEC_UNCACHED
-};
-
-/*
- * One of these structures is allocated when an mspec region is mmaped. The
- * structure is pointed to by the vma->vm_private_data field in the vma struct.
- * This structure is used to record the addresses of the mspec pages.
- * This structure is shared by all vma's that are split off from the
- * original vma when split_vma()'s are done.
- *
- * The refcnt is incremented atomically because mm->mmap_lock does not
- * protect in fork case where multiple tasks share the vma_data.
- */
-struct vma_data {
- refcount_t refcnt; /* Number of vmas sharing the data. */
- spinlock_t lock; /* Serialize access to this structure. */
- int count; /* Number of pages allocated. */
- enum mspec_page_type type; /* Type of pages allocated. */
- unsigned long vm_start; /* Original (unsplit) base. */
- unsigned long vm_end; /* Original (unsplit) end. */
- unsigned long maddr[]; /* Array of MSPEC addresses. */
-};
-
-/*
- * mspec_open
- *
- * Called when a device mapping is created by a means other than mmap
- * (via fork, munmap, etc.). Increments the reference count on the
- * underlying mspec data so it is not freed prematurely.
- */
-static void
-mspec_open(struct vm_area_struct *vma)
-{
- struct vma_data *vdata;
-
- vdata = vma->vm_private_data;
- refcount_inc(&vdata->refcnt);
-}
-
-/*
- * mspec_close
- *
- * Called when unmapping a device mapping. Frees all mspec pages
- * belonging to all the vma's sharing this vma_data structure.
- */
-static void
-mspec_close(struct vm_area_struct *vma)
-{
- struct vma_data *vdata;
- int index, last_index;
- unsigned long my_page;
-
- vdata = vma->vm_private_data;
-
- if (!refcount_dec_and_test(&vdata->refcnt))
- return;
-
- last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
- for (index = 0; index < last_index; index++) {
- if (vdata->maddr[index] == 0)
- continue;
- /*
- * Clear the page before sticking it back
- * into the pool.
- */
- my_page = vdata->maddr[index];
- vdata->maddr[index] = 0;
- memset((char *)my_page, 0, PAGE_SIZE);
- uncached_free_page(my_page, 1);
- }
-
- kvfree(vdata);
-}
-
-/*
- * mspec_fault
- *
- * Creates a mspec page and maps it to user space.
- */
-static vm_fault_t
-mspec_fault(struct vm_fault *vmf)
-{
- unsigned long paddr, maddr;
- unsigned long pfn;
- pgoff_t index = vmf->pgoff;
- struct vma_data *vdata = vmf->vma->vm_private_data;
-
- maddr = (volatile unsigned long) vdata->maddr[index];
- if (maddr == 0) {
- maddr = uncached_alloc_page(numa_node_id(), 1);
- if (maddr == 0)
- return VM_FAULT_OOM;
-
- spin_lock(&vdata->lock);
- if (vdata->maddr[index] == 0) {
- vdata->count++;
- vdata->maddr[index] = maddr;
- } else {
- uncached_free_page(maddr, 1);
- maddr = vdata->maddr[index];
- }
- spin_unlock(&vdata->lock);
- }
-
- paddr = maddr & ~__IA64_UNCACHED_OFFSET;
- pfn = paddr >> PAGE_SHIFT;
-
- return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
-}
-
-static const struct vm_operations_struct mspec_vm_ops = {
- .open = mspec_open,
- .close = mspec_close,
- .fault = mspec_fault,
-};
-
-/*
- * mspec_mmap
- *
- * Called when mmapping the device. Initializes the vma with a fault handler
- * and private data structure necessary to allocate, track, and free the
- * underlying pages.
- */
-static int
-mspec_mmap(struct file *file, struct vm_area_struct *vma,
- enum mspec_page_type type)
-{
- struct vma_data *vdata;
- int pages, vdata_size;
-
- if (vma->vm_pgoff != 0)
- return -EINVAL;
-
- if ((vma->vm_flags & VM_SHARED) == 0)
- return -EINVAL;
-
- if ((vma->vm_flags & VM_WRITE) == 0)
- return -EPERM;
-
- pages = vma_pages(vma);
- vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
- vdata = kvzalloc(vdata_size, GFP_KERNEL);
- if (!vdata)
- return -ENOMEM;
-
- vdata->vm_start = vma->vm_start;
- vdata->vm_end = vma->vm_end;
- vdata->type = type;
- spin_lock_init(&vdata->lock);
- refcount_set(&vdata->refcnt, 1);
- vma->vm_private_data = vdata;
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
- if (vdata->type == MSPEC_UNCACHED)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &mspec_vm_ops;
-
- return 0;
-}
-
-static int
-cached_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return mspec_mmap(file, vma, MSPEC_CACHED);
-}
-
-static int
-uncached_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return mspec_mmap(file, vma, MSPEC_UNCACHED);
-}
-
-static const struct file_operations cached_fops = {
- .owner = THIS_MODULE,
- .mmap = cached_mmap,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice cached_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "mspec_cached",
- .fops = &cached_fops
-};
-
-static const struct file_operations uncached_fops = {
- .owner = THIS_MODULE,
- .mmap = uncached_mmap,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice uncached_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "mspec_uncached",
- .fops = &uncached_fops
-};
-
-/*
- * mspec_init
- *
- * Called at boot time to initialize the mspec facility.
- */
-static int __init
-mspec_init(void)
-{
- int ret;
-
- ret = misc_register(&cached_miscdev);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device %i\n",
- CACHED_ID, ret);
- return ret;
- }
- ret = misc_register(&uncached_miscdev);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device %i\n",
- UNCACHED_ID, ret);
- misc_deregister(&cached_miscdev);
- return ret;
- }
-
- printk(KERN_INFO "%s %s initialized devices: %s %s\n",
- MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID);
-
- return 0;
-}
-
-static void __exit
-mspec_exit(void)
-{
- misc_deregister(&uncached_miscdev);
- misc_deregister(&cached_miscdev);
-}
-
-module_init(mspec_init);
-module_exit(mspec_exit);
-
-MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
-MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 3cb37760df..4a9c79391d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1683,7 +1683,6 @@ static struct ctl_table random_table[] = {
.mode = 0444,
.proc_handler = proc_do_uuid,
},
- { }
};
/*
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 9211531689..22d249333f 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -920,7 +920,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
}
return ret;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 680d1ef2a2..431e9e5bf9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -106,7 +106,7 @@ struct port_buffer {
unsigned int sgpages;
/* sg is used if spages > 0. sg must be the last in is struct */
- struct scatterlist sg[];
+ struct scatterlist sg[] __counted_by(sgpages);
};
/*
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f60bb61514..019cf6079c 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -84,18 +84,13 @@
#include <linux/sysctl.h>
#include <linux/fs.h>
#include <linux/cdev.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_OF
-/* For open firmware. */
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#endif
-
#include "xilinx_hwicap.h"
#include "buffer_icap.h"
#include "fifo_icap.h"
@@ -601,14 +596,14 @@ static const struct file_operations hwicap_fops = {
.llseek = noop_llseek,
};
-static int hwicap_setup(struct device *dev, int id,
- const struct resource *regs_res,
+static int hwicap_setup(struct platform_device *pdev, int id,
const struct hwicap_driver_config *config,
const struct config_registers *config_regs)
{
dev_t devt;
struct hwicap_drvdata *drvdata = NULL;
- int retval = 0;
+ struct device *dev = &pdev->dev;
+ int retval;
dev_info(dev, "Xilinx icap port driver\n");
@@ -636,72 +631,39 @@ static int hwicap_setup(struct device *dev, int id,
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
- drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+ drvdata = devm_kzalloc(dev, sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
retval = -ENOMEM;
- goto failed0;
+ goto failed;
}
dev_set_drvdata(dev, (void *)drvdata);
- if (!regs_res) {
- dev_err(dev, "Couldn't get registers resource\n");
- retval = -EFAULT;
- goto failed1;
- }
-
- drvdata->mem_start = regs_res->start;
- drvdata->mem_end = regs_res->end;
- drvdata->mem_size = resource_size(regs_res);
-
- if (!request_mem_region(drvdata->mem_start,
- drvdata->mem_size, DRIVER_NAME)) {
- dev_err(dev, "Couldn't lock memory region at %Lx\n",
- (unsigned long long) regs_res->start);
- retval = -EBUSY;
- goto failed1;
+ drvdata->base_address = devm_platform_ioremap_resource(pdev, 0);
+ if (!drvdata->base_address) {
+ retval = -ENODEV;
+ goto failed;
}
drvdata->devt = devt;
drvdata->dev = dev;
- drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
- if (!drvdata->base_address) {
- dev_err(dev, "ioremap() failed\n");
- retval = -ENOMEM;
- goto failed2;
- }
-
drvdata->config = config;
drvdata->config_regs = config_regs;
mutex_init(&drvdata->sem);
drvdata->is_open = 0;
- dev_info(dev, "ioremap %llx to %p with size %llx\n",
- (unsigned long long) drvdata->mem_start,
- drvdata->base_address,
- (unsigned long long) drvdata->mem_size);
-
cdev_init(&drvdata->cdev, &hwicap_fops);
drvdata->cdev.owner = THIS_MODULE;
retval = cdev_add(&drvdata->cdev, devt, 1);
if (retval) {
dev_err(dev, "cdev_add() failed\n");
- goto failed3;
+ goto failed;
}
device_create(&icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
return 0; /* success */
- failed3:
- iounmap(drvdata->base_address);
-
- failed2:
- release_mem_region(regs_res->start, drvdata->mem_size);
-
- failed1:
- kfree(drvdata);
-
- failed0:
+ failed:
mutex_lock(&icap_sem);
probed_devices[id] = 0;
mutex_unlock(&icap_sem);
@@ -723,75 +685,22 @@ static struct hwicap_driver_config fifo_icap_config = {
.reset = fifo_icap_reset,
};
-#ifdef CONFIG_OF
-static int hwicap_of_probe(struct platform_device *op,
- const struct hwicap_driver_config *config)
-{
- struct resource res;
- const unsigned int *id;
- const char *family;
- int rc;
- const struct config_registers *regs;
-
-
- rc = of_address_to_resource(op->dev.of_node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
- }
-
- id = of_get_property(op->dev.of_node, "port-number", NULL);
-
- /* It's most likely that we're using V4, if the family is not
- * specified
- */
- regs = &v4_config_registers;
- family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
-
- if (family) {
- if (!strcmp(family, "virtex2p"))
- regs = &v2_config_registers;
- else if (!strcmp(family, "virtex4"))
- regs = &v4_config_registers;
- else if (!strcmp(family, "virtex5"))
- regs = &v5_config_registers;
- else if (!strcmp(family, "virtex6"))
- regs = &v6_config_registers;
- }
- return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
- regs);
-}
-#else
-static inline int hwicap_of_probe(struct platform_device *op,
- const struct hwicap_driver_config *config)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_OF */
-
-static const struct of_device_id hwicap_of_match[];
static int hwicap_drv_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct resource *res;
const struct config_registers *regs;
+ const struct hwicap_driver_config *config;
const char *family;
+ int id = -1;
- match = of_match_device(hwicap_of_match, &pdev->dev);
- if (match)
- return hwicap_of_probe(pdev, match->data);
+ config = device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ of_property_read_u32(pdev->dev.of_node, "port-number", &id);
/* It's most likely that we're using V4, if the family is not
* specified
*/
regs = &v4_config_registers;
- family = pdev->dev.platform_data;
-
- if (family) {
+ if (!of_property_read_string(pdev->dev.of_node, "xlnx,family", &family)) {
if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
else if (!strcmp(family, "virtex4"))
@@ -801,9 +710,7 @@ static int hwicap_drv_probe(struct platform_device *pdev)
else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
}
-
- return hwicap_setup(&pdev->dev, pdev->id, res,
- &buffer_icap_config, regs);
+ return hwicap_setup(pdev, id, config, regs);
}
static void hwicap_drv_remove(struct platform_device *pdev)
@@ -815,16 +722,12 @@ static void hwicap_drv_remove(struct platform_device *pdev)
device_destroy(&icap_class, drvdata->devt);
cdev_del(&drvdata->cdev);
- iounmap(drvdata->base_address);
- release_mem_region(drvdata->mem_start, drvdata->mem_size);
- kfree(drvdata);
mutex_lock(&icap_sem);
probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
mutex_unlock(&icap_sem);
}
-#ifdef CONFIG_OF
/* Match table for device tree binding */
static const struct of_device_id hwicap_of_match[] = {
{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
@@ -832,9 +735,6 @@ static const struct of_device_id hwicap_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, hwicap_of_match);
-#else
-#define hwicap_of_match NULL
-#endif
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,